code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
def a (lowerCAmelCase__ ):
if not head:
return True
# split the list to two parts
__a , __a = head.next, head
while fast and fast.next:
__a = fast.next.next
__a = slow.next
__a = slow.next
__a = None # Don't forget here! But forget still works!
# reverse the second part
__a = None
while second:
__a = second.next
__a = node
__a = second
__a = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
__a = node.next
__a = head.next
return True
def a (lowerCAmelCase__ ):
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
__a = __a = __a = head
while fast and fast.next:
__a , __a = fast.next.next, slow.next
# 2. Push the second half into the stack
__a = [slow.val]
while slow.next:
__a = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
__a = cur.next
return True
def a (lowerCAmelCase__ ):
if not head or not head.next:
return True
__a = {}
__a = 0
while head:
if head.val in d:
d[head.val].append(lowerCAmelCase__ )
else:
__a = [pos]
__a = head.next
pos += 1
__a = pos - 1
__a = 0
for v in d.values():
if len(lowerCAmelCase__ ) % 2 != 0:
middle += 1
else:
__a = 0
for i in range(0 , len(lowerCAmelCase__ ) ):
if v[i] + v[len(lowerCAmelCase__ ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 99 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__: List[Any] = {
'configuration_x_clip': [
'XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XCLIPConfig',
'XCLIPTextConfig',
'XCLIPVisionConfig',
],
'processing_x_clip': ['XCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__: Any = [
'XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'XCLIPModel',
'XCLIPPreTrainedModel',
'XCLIPTextModel',
'XCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
a__: Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 190 | 0 |
"""simple docstring"""
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = torch.load(_lowerCamelCase , map_location='cpu' )
if "model" in sd.keys():
_lowerCAmelCase : Union[str, Any] = torch.load(_lowerCamelCase , map_location='cpu' )['model']
# pop unnecessary weights
_lowerCAmelCase : List[Any] = [
'decoder.version',
'decoder.output_projection.weight',
]
for key in keys_to_delete:
if key in sd:
sd.pop(_lowerCamelCase )
_lowerCAmelCase : Any = {
'decoder.project_in_dim.weight': 'decoder.project_in.weight',
'decoder.project_out_dim.weight': 'decoder.project_out.weight',
'decoder.layer_norm.weight': 'decoder.final_layer_norm.weight',
'decoder.layer_norm.bias': 'decoder.final_layer_norm.bias',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
_lowerCAmelCase : Tuple = sd.pop(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
_lowerCAmelCase : int = sd[key]
# We split QKV in separate Q,K,V
_lowerCAmelCase : List[Any] = key.replace('.qkv_proj.' , '.q_proj.' )
_lowerCAmelCase : Optional[int] = key.replace('.qkv_proj.' , '.k_proj.' )
_lowerCAmelCase : Any = key.replace('.qkv_proj.' , '.v_proj.' )
_lowerCAmelCase : Optional[Any] = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
_lowerCAmelCase : int = torch.split(_lowerCamelCase , depth // 3 , dim=0 )
_lowerCAmelCase : int = q
_lowerCAmelCase : List[Any] = k
_lowerCAmelCase : Optional[int] = v
del sd[key]
return sd
@torch.no_grad()
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = load_checkpoint(_lowerCamelCase )
if config is not None:
_lowerCAmelCase : str = OPTConfig.from_pretrained(_lowerCamelCase )
else:
_lowerCAmelCase : Tuple = OPTConfig()
_lowerCAmelCase : Dict = OPTModel(_lowerCamelCase ).half().eval()
model.load_state_dict(_lowerCamelCase )
# Check results
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
model.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--fairseq_path""",
type=str,
help=(
"""path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:"""
""" https://huggingface.co/models?other=opt_metasq"""
),
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--hf_config""", default=None, type=str, help="""Define HF config.""")
_lowerCAmelCase = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 711 |
"""simple docstring"""
import logging
import os
from .state import PartialState
class __UpperCamelCase ( logging.LoggerAdapter ):
@staticmethod
def __lowerCamelCase ( _A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def __lowerCamelCase ( self ,_A ,_A ,*_A ,**_A ):
'''simple docstring'''
if PartialState._shared_state == {}:
raise RuntimeError(
'You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.' )
_lowerCAmelCase : Tuple = kwargs.pop('main_process_only' ,_A )
_lowerCAmelCase : Any = kwargs.pop('in_order' ,_A )
if self.isEnabledFor(_A ):
if self._should_log(_A ):
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = self.process(_A ,_A )
self.logger.log(_A ,_A ,*_A ,**_A )
elif in_order:
_lowerCAmelCase : str = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = self.process(_A ,_A )
self.logger.log(_A ,_A ,*_A ,**_A )
state.wait_for_everyone()
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase = None ):
'''simple docstring'''
if log_level is None:
_lowerCAmelCase : Union[str, Any] = os.environ.get('ACCELERATE_LOG_LEVEL' , _lowerCamelCase )
_lowerCAmelCase : int = logging.getLogger(_lowerCamelCase )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(_lowerCamelCase , {} )
| 16 | 0 |
from statistics import mean
import numpy as np
def __UpperCamelCase (lowerCAmelCase : list, lowerCAmelCase : list, lowerCAmelCase : list, lowerCAmelCase : int ) -> list:
A = 0
# Number of processes finished
A = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
A = [0] * no_of_process
# List to include calculation results
A = [0] * no_of_process
# Sort by arrival time.
A = [burst_time[i] for i in np.argsort(lowercase_ )]
A = [process_name[i] for i in np.argsort(lowercase_ )]
arrival_time.sort()
while no_of_process > finished_process_count:
A = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
A = arrival_time[i]
A = 0
# Index showing the location of the process being performed
A = 0
# Saves the current response ratio.
A = 0
for i in range(0, lowercase_ ):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
A = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
A = temp
A = i
# Calculate the turn around time
A = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
A = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def __UpperCamelCase (lowerCAmelCase : list, lowerCAmelCase : list, lowerCAmelCase : list, lowerCAmelCase : int ) -> list:
A = [0] * no_of_process
for i in range(0, lowercase_ ):
A = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
_UpperCAmelCase = 5
_UpperCAmelCase = ['''A''', '''B''', '''C''', '''D''', '''E''']
_UpperCAmelCase = [1, 2, 3, 4, 5]
_UpperCAmelCase = [1, 2, 3, 4, 5]
_UpperCAmelCase = calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
_UpperCAmelCase = calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print("Process name \tArrival time \tBurst time \tTurn around time \tWaiting time")
for i in range(0, no_of_process):
print(
F'''{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t'''
F'''{turn_around_time[i]}\t\t\t{waiting_time[i]}'''
)
print(F'''average waiting time : {mean(waiting_time):.5f}''')
print(F'''average turn around time : {mean(turn_around_time):.5f}''')
| 699 |
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class lowerCamelCase_:
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=1_3 , lowerCamelCase__=2 , lowerCamelCase__=2_4 , lowerCamelCase__=1_6 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=3_2 , lowerCamelCase__=5 , lowerCamelCase__=4 , lowerCamelCase__=3_7 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=1_0 , lowerCamelCase__=0.0_2 , lowerCamelCase__=None , lowerCamelCase__=2 , lowerCamelCase__=2 , ):
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = patch_size
_lowerCamelCase = max_length
_lowerCamelCase = num_mel_bins
_lowerCamelCase = is_training
_lowerCamelCase = use_labels
_lowerCamelCase = hidden_size
_lowerCamelCase = num_hidden_layers
_lowerCamelCase = num_attention_heads
_lowerCamelCase = intermediate_size
_lowerCamelCase = hidden_act
_lowerCamelCase = hidden_dropout_prob
_lowerCamelCase = attention_probs_dropout_prob
_lowerCamelCase = type_sequence_label_size
_lowerCamelCase = initializer_range
_lowerCamelCase = scope
_lowerCamelCase = frequency_stride
_lowerCamelCase = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
_lowerCamelCase = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
_lowerCamelCase = (self.max_length - self.patch_size) // self.time_stride + 1
_lowerCamelCase = frequency_out_dimension * time_out_dimension
_lowerCamelCase = num_patches + 2
def snake_case__ ( self ):
_lowerCamelCase = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
_lowerCamelCase = None
if self.use_labels:
_lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCamelCase = self.get_config()
return config, input_values, labels
def snake_case__ ( self ):
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = ASTModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_lowerCamelCase = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self ):
_lowerCamelCase = self.prepare_config_and_inputs()
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) = config_and_inputs
_lowerCamelCase = {'''input_values''': input_values}
return config, inputs_dict
@require_torch
class lowerCamelCase_( A__, A__, unittest.TestCase ):
'''simple docstring'''
lowercase__ : Any = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
lowercase__ : List[str] = (
{'audio-classification': ASTForAudioClassification, 'feature-extraction': ASTModel}
if is_torch_available()
else {}
)
lowercase__ : int = False
lowercase__ : str = False
lowercase__ : Union[str, Any] = False
lowercase__ : List[str] = False
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def snake_case__ ( self ):
_lowerCamelCase = ASTModelTester(self )
_lowerCamelCase = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ , hidden_size=3_7 )
def snake_case__ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''AST does not use inputs_embeds''' )
def snake_case__ ( self ):
pass
def snake_case__ ( self ):
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(lowerCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowerCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase__ , nn.Linear ) )
def snake_case__ ( self ):
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(lowerCamelCase__ )
_lowerCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase = [*signature.parameters.keys()]
_lowerCamelCase = ['''input_values''']
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
@slow
def snake_case__ ( self ):
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase = ASTModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def lowerCAmelCase_( ) -> str:
_lowerCamelCase = hf_hub_download(
repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' )
_lowerCamelCase , _lowerCamelCase = torchaudio.load(lowercase_ )
return audio, sampling_rate
@require_torch
@require_torchaudio
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
@cached_property
def snake_case__ ( self ):
return (
ASTFeatureExtractor.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' )
if is_torchaudio_available()
else None
)
@slow
def snake_case__ ( self ):
_lowerCamelCase = self.default_feature_extractor
_lowerCamelCase = ASTForAudioClassification.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' ).to(lowerCamelCase__ )
_lowerCamelCase = self.default_feature_extractor
_lowerCamelCase , _lowerCamelCase = prepare_audio()
_lowerCamelCase = audio.squeeze().numpy()
_lowerCamelCase = feature_extractor(lowerCamelCase__ , sampling_rate=lowerCamelCase__ , return_tensors='''pt''' ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
_lowerCamelCase = model(**lowerCamelCase__ )
# verify the logits
_lowerCamelCase = torch.Size((1, 5_2_7) )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
_lowerCamelCase = torch.tensor([-0.8_7_6_0, -7.0_0_4_2, -8.6_6_0_2] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1e-4 ) )
| 661 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ :Dict = {
"configuration_lilt": ["LILT_PRETRAINED_CONFIG_ARCHIVE_MAP", "LiltConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :int = [
"LILT_PRETRAINED_MODEL_ARCHIVE_LIST",
"LiltForQuestionAnswering",
"LiltForSequenceClassification",
"LiltForTokenClassification",
"LiltModel",
"LiltPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
lowercase__ :int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 706 |
import argparse
from collections import defaultdict
import yaml
lowercase__ :Optional[int] = "docs/source/en/_toctree.yml"
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = defaultdict(lowerCAmelCase__ )
for doc in model_doc:
counts[doc["local"]] += 1
lowercase = [key for key, value in counts.items() if value > 1]
lowercase = []
for duplicate_key in duplicates:
lowercase = list({doc['''title'''] for doc in model_doc if doc['''local'''] == duplicate_key} )
if len(lowerCAmelCase__ ) > 1:
raise ValueError(
f'{duplicate_key} is present several times in the documentation table of content at '
'''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '''
'''others.''' )
# Only add this once
new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['''local''']] == 1] )
# Sort
return sorted(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : s["title"].lower() )
def UpperCamelCase ( lowerCAmelCase__=False ):
'''simple docstring'''
with open(lowerCAmelCase__ , encoding='''utf-8''' ) as f:
lowercase = yaml.safe_load(f.read() )
# Get to the API doc
lowercase = 0
while content[api_idx]["title"] != "API":
api_idx += 1
lowercase = content[api_idx]['''sections''']
# Then to the model doc
lowercase = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
lowercase = api_doc[model_idx]['''sections''']
lowercase = [(idx, section) for idx, section in enumerate(lowerCAmelCase__ ) if '''sections''' in section]
lowercase = False
for idx, modality_doc in modalities_docs:
lowercase = modality_doc['''sections''']
lowercase = clean_model_doc_toc(lowerCAmelCase__ )
if old_modality_doc != new_modality_doc:
lowercase = True
if overwrite:
lowercase = new_modality_doc
if diff:
if overwrite:
lowercase = model_doc
lowercase = api_doc
with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(lowerCAmelCase__ , allow_unicode=lowerCAmelCase__ ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
if __name__ == "__main__":
lowercase__ :Any = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
lowercase__ :int = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 633 | 0 |
from __future__ import annotations
def UpperCamelCase__ ( lowerCAmelCase__ ):
if len(lowerCAmelCase__ ) == 0:
return array
lowercase , lowercase = min(lowerCAmelCase__ ), max(lowerCAmelCase__ )
# Compute the variables
lowercase = _max - _min + 1
lowercase , lowercase = [0] * holes_range, [0] * holes_range
# Make the sorting.
for i in array:
lowercase = i - _min
lowercase = i
holes_repeat[index] += 1
# Makes the array back by replacing the numbers.
lowercase = 0
for i in range(lowerCAmelCase__ ):
while holes_repeat[i] > 0:
lowercase = holes[i]
index += 1
holes_repeat[i] -= 1
# Returns the sorted array.
return array
if __name__ == "__main__":
import doctest
doctest.testmod()
__SCREAMING_SNAKE_CASE : Optional[Any] =input('''Enter numbers separated by comma:\n''')
__SCREAMING_SNAKE_CASE : int =[int(x) for x in user_input.split(''',''')]
print(pigeon_sort(unsorted))
| 428 |
from __future__ import annotations
import math
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if depth < 0:
raise ValueError('''Depth cannot be less than 0''' )
if len(SCREAMING_SNAKE_CASE ) == 0:
raise ValueError('''Scores cannot be empty''' )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , minimax(depth + 1 , node_index * 2 + 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , )
return min(
minimax(depth + 1 , node_index * 2 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , minimax(depth + 1 , node_index * 2 + 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , )
def _a ( ):
"""simple docstring"""
lowercase__ = [90, 23, 6, 33, 21, 65, 1_23, 3_44_23]
lowercase__ = math.log(len(SCREAMING_SNAKE_CASE ) , 2 )
print('''Optimal value : ''' , end='''''' )
print(minimax(0 , 0 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 43 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
a = None
a = logging.get_logger(__name__)
a = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
a = {
"vocab_file": {
"google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model",
"google/bigbird-roberta-large": (
"https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"
),
"google/bigbird-base-trivia-itc": (
"https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"
),
},
"tokenizer_file": {
"google/bigbird-roberta-base": (
"https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json"
),
"google/bigbird-roberta-large": (
"https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json"
),
"google/bigbird-base-trivia-itc": (
"https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json"
),
},
}
a = {
"google/bigbird-roberta-base": 4096,
"google/bigbird-roberta-large": 4096,
"google/bigbird-base-trivia-itc": 4096,
}
a = "▁"
class __a ( __SCREAMING_SNAKE_CASE ):
__UpperCamelCase : Optional[int] = VOCAB_FILES_NAMES
__UpperCamelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : List[Any] = BigBirdTokenizer
__UpperCamelCase : Tuple = ['input_ids', 'attention_mask']
__UpperCamelCase : List[Any] = []
def __init__( self : str ,lowerCamelCase : Any=None ,lowerCamelCase : Optional[Any]=None ,lowerCamelCase : str="<unk>" ,lowerCamelCase : Union[str, Any]="<s>" ,lowerCamelCase : Optional[Any]="</s>" ,lowerCamelCase : Any="<pad>" ,lowerCamelCase : str="[SEP]" ,lowerCamelCase : Dict="[MASK]" ,lowerCamelCase : int="[CLS]" ,**lowerCamelCase : List[Any] ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else bos_token
__SCREAMING_SNAKE_CASE = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else eos_token
__SCREAMING_SNAKE_CASE = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else unk_token
__SCREAMING_SNAKE_CASE = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else pad_token
__SCREAMING_SNAKE_CASE = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else cls_token
__SCREAMING_SNAKE_CASE = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
__SCREAMING_SNAKE_CASE = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else mask_token
super().__init__(
_a ,tokenizer_file=_a ,bos_token=_a ,eos_token=_a ,unk_token=_a ,sep_token=_a ,pad_token=_a ,cls_token=_a ,mask_token=_a ,**_a ,)
__SCREAMING_SNAKE_CASE = vocab_file
__SCREAMING_SNAKE_CASE = False if not self.vocab_file else True
def UpperCAmelCase__ ( self : Any ,lowerCamelCase : List[int] ,lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [self.sep_token_id]
__SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCAmelCase__ ( self : Tuple ,lowerCamelCase : List[int] ,lowerCamelCase : Optional[List[int]] = None ,lowerCamelCase : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1] + ([0] * len(_a )) + [1]
def UpperCAmelCase__ ( self : str ,lowerCamelCase : List[int] ,lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [self.sep_token_id]
__SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase__ ( self : Union[str, Any] ,lowerCamelCase : str ,lowerCamelCase : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(_a ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__SCREAMING_SNAKE_CASE = os.path.join(
_a ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ):
copyfile(self.vocab_file ,_a )
return (out_vocab_file,)
| 704 |
'''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
a = logging.get_logger(__name__)
class __a ( _snake_case ):
__UpperCamelCase : int = 'linear'
__UpperCamelCase : Tuple = 'cosine'
__UpperCamelCase : Tuple = 'cosine_with_restarts'
__UpperCamelCase : List[Any] = 'polynomial'
__UpperCamelCase : Optional[Any] = 'constant'
__UpperCamelCase : Optional[int] = 'constant_with_warmup'
__UpperCamelCase : List[Any] = 'piecewise_constant'
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase = -1 ) -> int:
'''simple docstring'''
return LambdaLR(__UpperCAmelCase , lambda __UpperCAmelCase : 1 , last_epoch=__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = -1 ) -> List[Any]:
'''simple docstring'''
def lr_lambda(__UpperCAmelCase ):
if current_step < num_warmup_steps:
return float(__UpperCAmelCase ) / float(max(1.0 , __UpperCAmelCase ) )
return 1.0
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , last_epoch=__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = -1 ) -> int:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = step_rules.split(""",""" )
for rule_str in rule_list[:-1]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = rule_str.split(""":""" )
__SCREAMING_SNAKE_CASE = int(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = float(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = value
__SCREAMING_SNAKE_CASE = float(rule_list[-1] )
def create_rules_function(__UpperCAmelCase , __UpperCAmelCase ):
def rule_func(__UpperCAmelCase ) -> float:
__SCREAMING_SNAKE_CASE = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(__UpperCAmelCase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
__SCREAMING_SNAKE_CASE = create_rules_function(__UpperCAmelCase , __UpperCAmelCase )
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , last_epoch=__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=-1 ) -> int:
'''simple docstring'''
def lr_lambda(__UpperCAmelCase ):
if current_step < num_warmup_steps:
return float(__UpperCAmelCase ) / float(max(1 , __UpperCAmelCase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0.5 , __UpperCAmelCase = -1 ) -> Dict:
'''simple docstring'''
def lr_lambda(__UpperCAmelCase ):
if current_step < num_warmup_steps:
return float(__UpperCAmelCase ) / float(max(1 , __UpperCAmelCase ) )
__SCREAMING_SNAKE_CASE = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(__UpperCAmelCase ) * 2.0 * progress )) )
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 1 , __UpperCAmelCase = -1 ) -> Tuple:
'''simple docstring'''
def lr_lambda(__UpperCAmelCase ):
if current_step < num_warmup_steps:
return float(__UpperCAmelCase ) / float(max(1 , __UpperCAmelCase ) )
__SCREAMING_SNAKE_CASE = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(__UpperCAmelCase ) * progress) % 1.0) )) )
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=1e-7 , __UpperCAmelCase=1.0 , __UpperCAmelCase=-1 ) -> Tuple:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = optimizer.defaults["""lr"""]
if not (lr_init > lr_end):
raise ValueError(f"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(__UpperCAmelCase ):
if current_step < num_warmup_steps:
return float(__UpperCAmelCase ) / float(max(1 , __UpperCAmelCase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
__SCREAMING_SNAKE_CASE = lr_init - lr_end
__SCREAMING_SNAKE_CASE = num_training_steps - num_warmup_steps
__SCREAMING_SNAKE_CASE = 1 - (current_step - num_warmup_steps) / decay_steps
__SCREAMING_SNAKE_CASE = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
a = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = 1 , __UpperCAmelCase = 1.0 , __UpperCAmelCase = -1 , ) -> str:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = SchedulerType(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(__UpperCAmelCase , last_epoch=__UpperCAmelCase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(__UpperCAmelCase , step_rules=__UpperCAmelCase , last_epoch=__UpperCAmelCase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(__UpperCAmelCase , num_warmup_steps=__UpperCAmelCase , last_epoch=__UpperCAmelCase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
__UpperCAmelCase , num_warmup_steps=__UpperCAmelCase , num_training_steps=__UpperCAmelCase , num_cycles=__UpperCAmelCase , last_epoch=__UpperCAmelCase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
__UpperCAmelCase , num_warmup_steps=__UpperCAmelCase , num_training_steps=__UpperCAmelCase , power=__UpperCAmelCase , last_epoch=__UpperCAmelCase , )
return schedule_func(
__UpperCAmelCase , num_warmup_steps=__UpperCAmelCase , num_training_steps=__UpperCAmelCase , last_epoch=__UpperCAmelCase )
| 13 | 0 |
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger()
@dataclass
class SCREAMING_SNAKE_CASE :
__lowerCamelCase : nn.Module
__lowerCamelCase : List[nn.Module] =field(default_factory=lowerCamelCase_ )
__lowerCamelCase : list =field(default_factory=lowerCamelCase_ )
def UpperCamelCase_ ( self : Optional[Any] , __lowercase : str , __lowercase : Optional[Any] , __lowercase : int ):
'''simple docstring'''
__a = len(list(m.modules() ) ) == 1 or isinstance(__lowercase , nn.Convad ) or isinstance(__lowercase , nn.BatchNormad )
if has_not_submodules:
self.traced.append(__lowercase )
def __call__( self : Any , __lowercase : Union[str, Any] ):
'''simple docstring'''
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(__lowercase )
[x.remove() for x in self.handles]
return self
@property
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
return list(filter(lambda __lowercase : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class SCREAMING_SNAKE_CASE :
__lowerCamelCase : nn.Module
__lowerCamelCase : nn.Module
__lowerCamelCase : int =1
__lowerCamelCase : List =field(default_factory=lowerCamelCase_ )
__lowerCamelCase : List =field(default_factory=lowerCamelCase_ )
__lowerCamelCase : bool =True
def __call__( self : Tuple , __lowercase : Dict ):
'''simple docstring'''
__a = Tracker(self.dest )(__lowercase ).parametrized
__a = Tracker(self.src )(__lowercase ).parametrized
__a = list(filter(lambda __lowercase : type(__lowercase ) not in self.src_skip , __lowercase ) )
__a = list(filter(lambda __lowercase : type(__lowercase ) not in self.dest_skip , __lowercase ) )
if len(__lowercase ) != len(__lowercase ) and self.raise_if_mismatch:
raise Exception(
F"Numbers of operations are different. Source module has {len(__lowercase )} operations while"
F" destination module has {len(__lowercase )}." )
for dest_m, src_m in zip(__lowercase , __lowercase ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F"Transfered from={src_m} to={dest_m}" )
class SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self : int , __lowercase : List[Any] ):
'''simple docstring'''
super().__init__()
__a = []
# - get the stem
feature_blocks.append(("""conv1""", model.stem) )
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith("""block""" ), F"Unexpected layer name {k}"
__a = len(__lowercase ) + 1
feature_blocks.append((F"res{block_index}", v) )
__a = nn.ModuleDict(__lowercase )
def UpperCamelCase_ ( self : Tuple , __lowercase : Tuple ):
'''simple docstring'''
return get_trunk_forward_outputs(
__lowercase , out_feat_keys=__lowercase , feature_blocks=self._feature_blocks , )
class SCREAMING_SNAKE_CASE ( lowerCamelCase_ ):
def UpperCamelCase_ ( self : Tuple , __lowercase : str ):
'''simple docstring'''
__a = x.split("""-""" )
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] )
def __getitem__( self : Optional[int] , __lowercase : List[Any] ):
'''simple docstring'''
if x not in self:
__a = self.convert_name_to_timm(__lowercase )
__a = partial(lambda: (timm.create_model(__lowercase , pretrained=__lowercase ).eval(), None) )
else:
__a = super().__getitem__(__lowercase )
return val
class SCREAMING_SNAKE_CASE ( lowerCamelCase_ ):
def __getitem__( self : Dict , __lowercase : Union[str, Any] ):
'''simple docstring'''
if "seer" in x and "in1k" not in x:
__a = RegNetModel
else:
__a = RegNetForImageClassification
return val
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[Tuple[str, str]] ):
"""simple docstring"""
for from_key, to_key in keys:
__a = from_state_dict[from_key].clone()
print(f"Copied key={from_key} to={to_key}" )
return to_state_dict
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Callable[[], nn.Module] , _SCREAMING_SNAKE_CASE : Callable[[], nn.Module] , _SCREAMING_SNAKE_CASE : RegNetConfig , _SCREAMING_SNAKE_CASE : Path , _SCREAMING_SNAKE_CASE : bool = True , ):
"""simple docstring"""
print(f"Converting {name}..." )
with torch.no_grad():
__a = from_model_func()
__a = our_model_func(lowercase__ ).eval()
__a = ModuleTransfer(src=lowercase__ , dest=lowercase__ , raise_if_mismatch=lowercase__ )
__a = torch.randn((1, 3, 224, 224) )
module_transfer(lowercase__ )
if from_state_dict is not None:
__a = []
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
__a = [("0.clf.0.weight", "classifier.1.weight"), ("0.clf.0.bias", "classifier.1.bias")]
__a = manually_copy_vissl_head(lowercase__ , our_model.state_dict() , lowercase__ )
our_model.load_state_dict(lowercase__ )
__a = our_model(lowercase__ , output_hidden_states=lowercase__ )
__a = (
our_outputs.logits if isinstance(lowercase__ , lowercase__ ) else our_outputs.last_hidden_state
)
__a = from_model(lowercase__ )
__a = from_output[-1] if type(lowercase__ ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
__a = our_outputs.hidden_states[-1]
assert torch.allclose(lowercase__ , lowercase__ ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name , commit_message="""Add model""" , use_temp_dir=lowercase__ , )
__a = 224 if "seer" not in name else 384
# we can use the convnext one
__a = AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""" , size=lowercase__ )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name , commit_message="""Add image processor""" , use_temp_dir=lowercase__ , )
print(f"Pushed {name}" )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Path , _SCREAMING_SNAKE_CASE : str = None , _SCREAMING_SNAKE_CASE : bool = True ):
"""simple docstring"""
__a = "imagenet-1k-id2label.json"
__a = 1000
__a = (1, num_labels)
__a = "huggingface/label-files"
__a = num_labels
__a = json.load(open(cached_download(hf_hub_url(lowercase__ , lowercase__ , repo_type="""dataset""" ) ) , """r""" ) )
__a = {int(lowercase__ ): v for k, v in idalabel.items()}
__a = idalabel
__a = {v: k for k, v in idalabel.items()}
__a = partial(lowercase__ , num_labels=lowercase__ , idalabel=lowercase__ , labelaid=lowercase__ )
__a = {
"regnet-x-002": ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 , layer_type="""x""" ),
"regnet-x-004": ImageNetPreTrainedConfig(
depths=[1, 2, 7, 12] , hidden_sizes=[32, 64, 160, 384] , groups_width=16 , layer_type="""x""" ),
"regnet-x-006": ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7] , hidden_sizes=[48, 96, 240, 528] , groups_width=24 , layer_type="""x""" ),
"regnet-x-008": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5] , hidden_sizes=[64, 128, 288, 672] , groups_width=16 , layer_type="""x""" ),
"regnet-x-016": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 2] , hidden_sizes=[72, 168, 408, 912] , groups_width=24 , layer_type="""x""" ),
"regnet-x-032": ImageNetPreTrainedConfig(
depths=[2, 6, 15, 2] , hidden_sizes=[96, 192, 432, 1008] , groups_width=48 , layer_type="""x""" ),
"regnet-x-040": ImageNetPreTrainedConfig(
depths=[2, 5, 14, 2] , hidden_sizes=[80, 240, 560, 1360] , groups_width=40 , layer_type="""x""" ),
"regnet-x-064": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 392, 784, 1624] , groups_width=56 , layer_type="""x""" ),
"regnet-x-080": ImageNetPreTrainedConfig(
depths=[2, 5, 15, 1] , hidden_sizes=[80, 240, 720, 1920] , groups_width=120 , layer_type="""x""" ),
"regnet-x-120": ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 , layer_type="""x""" ),
"regnet-x-160": ImageNetPreTrainedConfig(
depths=[2, 6, 13, 1] , hidden_sizes=[256, 512, 896, 2048] , groups_width=128 , layer_type="""x""" ),
"regnet-x-320": ImageNetPreTrainedConfig(
depths=[2, 7, 13, 1] , hidden_sizes=[336, 672, 1344, 2520] , groups_width=168 , layer_type="""x""" ),
# y variant
"regnet-y-002": ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 ),
"regnet-y-004": ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6] , hidden_sizes=[48, 104, 208, 440] , groups_width=8 ),
"regnet-y-006": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4] , hidden_sizes=[48, 112, 256, 608] , groups_width=16 ),
"regnet-y-008": ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2] , hidden_sizes=[64, 128, 320, 768] , groups_width=16 ),
"regnet-y-016": ImageNetPreTrainedConfig(
depths=[2, 6, 17, 2] , hidden_sizes=[48, 120, 336, 888] , groups_width=24 ),
"regnet-y-032": ImageNetPreTrainedConfig(
depths=[2, 5, 13, 1] , hidden_sizes=[72, 216, 576, 1512] , groups_width=24 ),
"regnet-y-040": ImageNetPreTrainedConfig(
depths=[2, 6, 12, 2] , hidden_sizes=[128, 192, 512, 1088] , groups_width=64 ),
"regnet-y-064": ImageNetPreTrainedConfig(
depths=[2, 7, 14, 2] , hidden_sizes=[144, 288, 576, 1296] , groups_width=72 ),
"regnet-y-080": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 448, 896, 2016] , groups_width=56 ),
"regnet-y-120": ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 ),
"regnet-y-160": ImageNetPreTrainedConfig(
depths=[2, 4, 11, 1] , hidden_sizes=[224, 448, 1232, 3024] , groups_width=112 ),
"regnet-y-320": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
"regnet-y-320-seer": RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
"regnet-y-640-seer": RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328 ),
"regnet-y-1280-seer": RegNetConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264 ),
"regnet-y-2560-seer": RegNetConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640 ),
"regnet-y-10b-seer": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 1_1110, 2_8280] , groups_width=1010 ),
# finetuned on imagenet
"regnet-y-320-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
"regnet-y-640-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328 ),
"regnet-y-1280-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264 ),
"regnet-y-2560-seer-in1k": ImageNetPreTrainedConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640 ),
"regnet-y-10b-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 1_1110, 2_8280] , groups_width=1010 ),
}
__a = NameToOurModelFuncMap()
__a = NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(_SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Callable[[], nn.Module] ) -> Tuple[nn.Module, Dict]:
__a = torch.hub.load_state_dict_from_url(lowercase__ , model_dir=str(lowercase__ ) , map_location="""cpu""" )
__a = model_func()
# check if we have a head, if yes add it
__a = files["classy_state_dict"]["base_model"]["model"]
__a = model_state_dict["trunk"]
model.load_state_dict(lowercase__ )
return model.eval(), model_state_dict["heads"]
# pretrained
__a = partial(
lowercase__ , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__a = partial(
lowercase__ , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__a = partial(
lowercase__ , """https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
__a = partial(
lowercase__ , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch""" , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=620.83 , w_m=2.52 ) ) ) , )
# IN1K finetuned
__a = partial(
lowercase__ , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__a = partial(
lowercase__ , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__a = partial(
lowercase__ , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
__a = partial(
lowercase__ , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch""" , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=620.83 , w_m=2.52 ) ) ) , )
if model_name:
convert_weight_and_push(
lowercase__ , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , lowercase__ , lowercase__ , )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
lowercase__ , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , lowercase__ , lowercase__ , lowercase__ , )
return config, expected_shape
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help=(
"""The name of the model you wish to convert, it must be one of the supported regnet* architecture,"""
""" currently: regnetx-*, regnety-*. If `None`, all of them will the converted."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=Path,
required=True,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
default=True,
type=bool,
required=False,
help="""If True, push model and image processor to the hub.""",
)
lowerCamelCase__ = parser.parse_args()
lowerCamelCase__ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 225 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"microsoft/git-base": "https://huggingface.co/microsoft/git-base/resolve/main/config.json",
}
class lowerCamelCase__ ( lowerCamelCase_ ):
a__ : Optional[Any] = """git_vision_model"""
def __init__( self , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=3_072 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=224 , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE="quick_gelu" , SCREAMING_SNAKE_CASE=1E-5 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.02 , **SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE )
snake_case : Tuple = hidden_size
snake_case : Dict = intermediate_size
snake_case : Tuple = num_hidden_layers
snake_case : Any = num_attention_heads
snake_case : Optional[int] = num_channels
snake_case : str = patch_size
snake_case : str = image_size
snake_case : List[Any] = initializer_range
snake_case : Union[str, Any] = attention_dropout
snake_case : str = layer_norm_eps
snake_case : List[Any] = hidden_act
@classmethod
def lowerCamelCase_ ( cls , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ):
"""simple docstring"""
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE )
snake_case , snake_case : Optional[int] = cls.get_config_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("model_type" ) == "git":
snake_case : Union[str, Any] = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
class lowerCamelCase__ ( lowerCamelCase_ ):
a__ : Union[str, Any] = """git"""
def __init__( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=30_522 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=6 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=3_072 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=1_024 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=1E-12 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE="absolute" , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=101 , SCREAMING_SNAKE_CASE=102 , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , pad_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
if vision_config is None:
snake_case : Union[str, Any] = {}
logger.info("vision_config is None. initializing the GitVisionConfig with default values." )
snake_case : int = GitVisionConfig(**SCREAMING_SNAKE_CASE )
snake_case : Optional[int] = vocab_size
snake_case : str = hidden_size
snake_case : Dict = num_hidden_layers
snake_case : Tuple = num_attention_heads
snake_case : Tuple = hidden_act
snake_case : Optional[int] = intermediate_size
snake_case : Any = hidden_dropout_prob
snake_case : int = attention_probs_dropout_prob
snake_case : str = max_position_embeddings
snake_case : Optional[int] = initializer_range
snake_case : Union[str, Any] = layer_norm_eps
snake_case : List[str] = position_embedding_type
snake_case : Tuple = use_cache
snake_case : Optional[Any] = tie_word_embeddings
snake_case : Tuple = num_image_with_embedding
snake_case : Tuple = bos_token_id
snake_case : Union[str, Any] = eos_token_id
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : List[str] = copy.deepcopy(self.__dict__ )
snake_case : List[str] = self.vision_config.to_dict()
snake_case : Any = self.__class__.model_type
return output
| 134 | 0 |
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__a = {
'configuration_cpmant': ['CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CpmAntConfig'],
'tokenization_cpmant': ['CpmAntTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST',
'CpmAntForCausalLM',
'CpmAntModel',
'CpmAntPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 714 |
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class lowercase__( UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
a :List[Any] = RoFormerTokenizer
a :Any = RoFormerTokenizerFast
a :List[str] = True
a :List[Any] = True
def _lowercase ( self : str ) -> Any:
super().setUp()
def _lowercase ( self : Union[str, Any] , **SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> List[str]:
return self.tokenizer_class.from_pretrained('''junnyu/roformer_chinese_base''' , **SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Tuple , **SCREAMING_SNAKE_CASE_ : Tuple ) -> Union[str, Any]:
return self.rust_tokenizer_class.from_pretrained('''junnyu/roformer_chinese_base''' , **SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Union[str, Any] ) -> List[str]:
lowercase_ = '''永和服装饰品有限公司,今天天气非常好'''
lowercase_ = '''永和 服装 饰品 有限公司 , 今 天 天 气 非常 好'''
return input_text, output_text
def _lowercase ( self : Any ) -> Any:
lowercase_ = self.get_tokenizer()
lowercase_ , lowercase_ = self.get_chinese_input_output_texts()
lowercase_ = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , output_text.split() )
lowercase_ = tokens + [tokenizer.unk_token]
lowercase_ = [2_2_9_4_3, 2_1_3_3_2, 3_4_4_3_1, 4_5_9_0_4, 1_1_7, 3_0_6, 1_2_3_1, 1_2_3_1, 2_6_5_3, 3_3_9_9_4, 1_2_6_6, 1_0_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : int ) -> Dict:
lowercase_ = self.get_rust_tokenizer()
lowercase_ , lowercase_ = self.get_chinese_input_output_texts()
lowercase_ = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , output_text.split() )
lowercase_ = tokens + [tokenizer.unk_token]
lowercase_ = [2_2_9_4_3, 2_1_3_3_2, 3_4_4_3_1, 4_5_9_0_4, 1_1_7, 3_0_6, 1_2_3_1, 1_2_3_1, 2_6_5_3, 3_3_9_9_4, 1_2_6_6, 1_0_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Tuple ) -> Union[str, Any]:
pass
def _lowercase ( self : Dict ) -> Optional[int]:
pass
def _lowercase ( self : Tuple ) -> str:
pass
| 409 | 0 |
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class _lowerCAmelCase( unittest.TestCase ):
"""simple docstring"""
def _a ( self ):
UpperCamelCase_: Union[str, Any] = 'ylacombe/bark-small'
UpperCamelCase_: Optional[int] = tempfile.mkdtemp()
UpperCamelCase_: Dict = 'en_speaker_1'
UpperCamelCase_: List[Any] = 'This is a test string'
UpperCamelCase_: Tuple = 'speaker_embeddings_path.json'
UpperCamelCase_: Tuple = 'speaker_embeddings'
def _a ( self , **_lowerCamelCase ):
return AutoTokenizer.from_pretrained(self.checkpoint , **_lowerCamelCase )
def _a ( self ):
shutil.rmtree(self.tmpdirname )
def _a ( self ):
UpperCamelCase_: int = self.get_tokenizer()
UpperCamelCase_: Dict = BarkProcessor(tokenizer=_lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase_: Optional[Any] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def _a ( self ):
UpperCamelCase_: Dict = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
UpperCamelCase_: Optional[int] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
UpperCamelCase_: Optional[int] = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='(BOS)' , eos_token='(EOS)' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def _a ( self ):
UpperCamelCase_: Union[str, Any] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
UpperCamelCase_: int = 3_5
UpperCamelCase_: Optional[int] = 2
UpperCamelCase_: int = 8
UpperCamelCase_: Union[str, Any] = {
'semantic_prompt': np.ones(_lowerCamelCase ),
'coarse_prompt': np.ones((nb_codebooks_coarse, seq_len) ),
'fine_prompt': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
UpperCamelCase_: Dict = processor(text=self.input_string , voice_preset=_lowerCamelCase )
UpperCamelCase_: str = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_lowerCamelCase , np.array([] ) ).tolist() )
# test loading voice preset from npz file
UpperCamelCase_: Tuple = os.path.join(self.tmpdirname , 'file.npz' )
np.savez(_lowerCamelCase , **_lowerCamelCase )
UpperCamelCase_: List[Any] = processor(text=self.input_string , voice_preset=_lowerCamelCase )
UpperCamelCase_: Tuple = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_lowerCamelCase , np.array([] ) ).tolist() )
# test loading voice preset from the hub
UpperCamelCase_: int = processor(text=self.input_string , voice_preset=self.voice_preset )
def _a ( self ):
UpperCamelCase_: Tuple = self.get_tokenizer()
UpperCamelCase_: str = BarkProcessor(tokenizer=_lowerCamelCase )
UpperCamelCase_: List[str] = processor(text=self.input_string )
UpperCamelCase_: List[Any] = tokenizer(
self.input_string , padding='max_length' , max_length=2_5_6 , add_special_tokens=_lowerCamelCase , return_attention_mask=_lowerCamelCase , return_token_type_ids=_lowerCamelCase , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() ) | 57 |
'''simple docstring'''
from __future__ import annotations
from functools import lru_cache
from math import ceil
UpperCAmelCase__ = 1_0_0
UpperCAmelCase__ = set(range(3, NUM_PRIMES, 2))
primes.add(2)
UpperCAmelCase__ = 42
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=100 )
def UpperCAmelCase__( _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
__A= set()
__A= 42
__A= 42
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def UpperCAmelCase__( _SCREAMING_SNAKE_CASE : int = 5000 ):
"""simple docstring"""
for number_to_partition in range(1,_SCREAMING_SNAKE_CASE ):
if len(partition(_SCREAMING_SNAKE_CASE ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(F"""{solution() = }""")
| 186 | 0 |
import argparse
import os
import re
_A = 'src/diffusers'
# Pattern that looks at the indentation in a line.
_A = re.compile(r'^(\s*)\S')
# Pattern that matches `"key":" and puts `key` in group 0.
_A = re.compile(r'^\s*"([^"]+)":')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
_A = re.compile(r'^\s*_import_structure\["([^"]+)"\]')
# Pattern that matches `"key",` and puts `key` in group 0.
_A = re.compile(r'^\s*"([^"]+)",\s*$')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
_A = re.compile(r'\[([^\]]+)\]')
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : Any ) -> List[Any]:
"""simple docstring"""
a_ = _re_indent.search(UpperCamelCase )
return "" if search is None else search.groups()[0]
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : Optional[Any] , UpperCamelCase : List[Any]="" , UpperCamelCase : int=None , UpperCamelCase : Dict=None ) -> List[str]:
"""simple docstring"""
a_ = 0
a_ = code.split("""\n""" )
if start_prompt is not None:
while not lines[index].startswith(UpperCamelCase ):
index += 1
a_ = ["""\n""".join(lines[:index] )]
else:
a_ = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
a_ = [lines[index]]
index += 1
while index < len(UpperCamelCase ) and (end_prompt is None or not lines[index].startswith(UpperCamelCase )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(UpperCamelCase ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + """ """ ):
current_block.append(lines[index] )
blocks.append("""\n""".join(UpperCamelCase ) )
if index < len(UpperCamelCase ) - 1:
a_ = [lines[index + 1]]
index += 1
else:
a_ = []
else:
blocks.append("""\n""".join(UpperCamelCase ) )
a_ = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(UpperCamelCase ) > 0:
blocks.append("""\n""".join(UpperCamelCase ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(UpperCamelCase ):
blocks.append("""\n""".join(lines[index:] ) )
return blocks
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
def _inner(UpperCamelCase : Optional[int] ):
return key(UpperCamelCase ).lower().replace("""_""" , """""" )
return _inner
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[Any]=None ) -> Union[str, Any]:
"""simple docstring"""
def noop(UpperCamelCase : Tuple ):
return x
if key is None:
a_ = noop
# Constants are all uppercase, they go first.
a_ = [obj for obj in objects if key(UpperCamelCase ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
a_ = [obj for obj in objects if key(UpperCamelCase )[0].isupper() and not key(UpperCamelCase ).isupper()]
# Functions begin with a lowercase, they go last.
a_ = [obj for obj in objects if not key(UpperCamelCase )[0].isupper()]
a_ = ignore_underscore(UpperCamelCase )
return sorted(UpperCamelCase , key=UpperCamelCase ) + sorted(UpperCamelCase , key=UpperCamelCase ) + sorted(UpperCamelCase , key=UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : List[str] ) -> List[str]:
"""simple docstring"""
def _replace(UpperCamelCase : Tuple ):
a_ = match.groups()[0]
if "," not in imports:
return F"""[{imports}]"""
a_ = [part.strip().replace("""\"""" , """""" ) for part in imports.split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
a_ = keys[:-1]
return "[" + ", ".join([F"""\"{k}\"""" for k in sort_objects(UpperCamelCase )] ) + "]"
a_ = import_statement.split("""\n""" )
if len(UpperCamelCase ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
a_ = 2 if lines[1].strip() == """[""" else 1
a_ = [(i, _re_strip_line.search(UpperCamelCase ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
a_ = sort_objects(UpperCamelCase , key=lambda UpperCamelCase : x[1] )
a_ = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(UpperCamelCase ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
a_ = _re_bracket_content.sub(_replace , lines[1] )
else:
a_ = [part.strip().replace("""\"""" , """""" ) for part in lines[1].split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
a_ = keys[:-1]
a_ = get_indent(lines[1] ) + """, """.join([F"""\"{k}\"""" for k in sort_objects(UpperCamelCase )] )
return "\n".join(UpperCamelCase )
else:
# Finally we have to deal with imports fitting on one line
a_ = _re_bracket_content.sub(_replace , UpperCamelCase )
return import_statement
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : Union[str, Any] , UpperCamelCase : str=True ) -> Tuple:
"""simple docstring"""
with open(UpperCamelCase , """r""" ) as f:
a_ = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
a_ = split_code_in_indented_blocks(
UpperCamelCase , start_prompt="""_import_structure = {""" , end_prompt="""if TYPE_CHECKING:""" )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(UpperCamelCase ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
a_ = main_blocks[block_idx]
a_ = block.split("""\n""" )
# Get to the start of the imports.
a_ = 0
while line_idx < len(UpperCamelCase ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
a_ = len(UpperCamelCase )
else:
line_idx += 1
if line_idx >= len(UpperCamelCase ):
continue
# Ignore beginning and last line: they don't contain anything.
a_ = """\n""".join(block_lines[line_idx:-1] )
a_ = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
a_ = split_code_in_indented_blocks(UpperCamelCase , indent_level=UpperCamelCase )
# We have two categories of import key: list or _import_structure[key].append/extend
a_ = _re_direct_key if """_import_structure""" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
a_ = [(pattern.search(UpperCamelCase ).groups()[0] if pattern.search(UpperCamelCase ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
a_ = [(i, key) for i, key in enumerate(UpperCamelCase ) if key is not None]
a_ = [x[0] for x in sorted(UpperCamelCase , key=lambda UpperCamelCase : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
a_ = 0
a_ = []
for i in range(len(UpperCamelCase ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
a_ = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(UpperCamelCase )
count += 1
# And we put our main block back together with its first and last line.
a_ = """\n""".join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(UpperCamelCase ):
if check_only:
return True
else:
print(F"""Overwriting {file}.""" )
with open(UpperCamelCase , """w""" ) as f:
f.write("""\n""".join(UpperCamelCase ) )
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : List[Any]=True ) -> List[str]:
"""simple docstring"""
a_ = []
for root, _, files in os.walk(UpperCamelCase ):
if "__init__.py" in files:
a_ = sort_imports(os.path.join(UpperCamelCase , """__init__.py""" ) , check_only=UpperCamelCase )
if result:
a_ = [os.path.join(UpperCamelCase , """__init__.py""" )]
if len(UpperCamelCase ) > 0:
raise ValueError(F"""Would overwrite {len(UpperCamelCase )} files, run `make style`.""" )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
_A = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only) | 403 |
from typing import Any
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : list ) -> list[Any]:
"""simple docstring"""
if not input_list:
return []
a_ = [input_list.count(UpperCamelCase ) for value in input_list]
a_ = max(UpperCamelCase ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(UpperCamelCase ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod() | 403 | 1 |
'''simple docstring'''
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowercase_ ( _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = "hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"
def __UpperCAmelCase ( self : Any, UpperCamelCase__ : str=0 ) -> int:
_A = floats_tensor((1, 3, 1_28, 1_28), rng=random.Random(UpperCamelCase__ ) )
_A = np.random.RandomState(UpperCamelCase__ )
_A = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'strength': 0.75,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def __UpperCAmelCase ( self : Union[str, Any] ) -> Any:
_A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
_A = self.get_dummy_inputs()
_A = pipe(**UpperCamelCase__ ).images
_A = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 1_28, 1_28, 3)
_A = np.array([0.69_643, 0.58_484, 0.50_314, 0.58_760, 0.55_368, 0.59_643, 0.51_529, 0.41_217, 0.49_087] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def __UpperCAmelCase ( self : int ) -> List[Any]:
_A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider' )
_A = PNDMScheduler.from_config(pipe.scheduler.config, skip_prk_steps=UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
_A = self.get_dummy_inputs()
_A = pipe(**UpperCamelCase__ ).images
_A = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_A = np.array([0.61_737, 0.54_642, 0.53_183, 0.54_465, 0.52_742, 0.60_525, 0.49_969, 0.40_655, 0.48_154] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def __UpperCAmelCase ( self : Optional[int] ) -> int:
_A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider' )
_A = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
# warmup pass to apply optimizations
_A = pipe(**self.get_dummy_inputs() )
_A = self.get_dummy_inputs()
_A = pipe(**UpperCamelCase__ ).images
_A = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_A = np.array([0.52_761, 0.59_977, 0.49_033, 0.49_619, 0.54_282, 0.50_311, 0.47_600, 0.40_918, 0.45_203] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def __UpperCAmelCase ( self : str ) -> Tuple:
_A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider' )
_A = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
_A = self.get_dummy_inputs()
_A = pipe(**UpperCamelCase__ ).images
_A = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_A = np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def __UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
_A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider' )
_A = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
_A = self.get_dummy_inputs()
_A = pipe(**UpperCamelCase__ ).images
_A = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_A = np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
_A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider' )
_A = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
_A = self.get_dummy_inputs()
_A = pipe(**UpperCamelCase__ ).images
_A = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_A = np.array([0.65_331, 0.58_277, 0.48_204, 0.56_059, 0.53_665, 0.56_235, 0.50_969, 0.40_009, 0.46_552] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
@property
def __UpperCAmelCase ( self : str ) -> Dict:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __UpperCAmelCase ( self : Any ) -> Tuple:
_A = ort.SessionOptions()
_A = False
return options
def __UpperCAmelCase ( self : List[Any] ) -> List[Any]:
_A = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
_A = init_image.resize((7_68, 5_12) )
# using the PNDM scheduler by default
_A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='onnx', safety_checker=UpperCamelCase__, feature_extractor=UpperCamelCase__, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
_A = 'A fantasy landscape, trending on artstation'
_A = np.random.RandomState(0 )
_A = pipe(
prompt=UpperCamelCase__, image=UpperCamelCase__, strength=0.75, guidance_scale=7.5, num_inference_steps=10, generator=UpperCamelCase__, output_type='np', )
_A = output.images
_A = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 7_68, 3)
_A = np.array([0.4_909, 0.5_059, 0.5_372, 0.4_623, 0.4_876, 0.5_049, 0.4_820, 0.4_956, 0.5_019] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def __UpperCAmelCase ( self : Optional[int] ) -> Dict:
_A = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
_A = init_image.resize((7_68, 5_12) )
_A = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5', subfolder='scheduler', revision='onnx' )
_A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5', revision='onnx', scheduler=UpperCamelCase__, safety_checker=UpperCamelCase__, feature_extractor=UpperCamelCase__, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
_A = 'A fantasy landscape, trending on artstation'
_A = np.random.RandomState(0 )
_A = pipe(
prompt=UpperCamelCase__, image=UpperCamelCase__, strength=0.75, guidance_scale=7.5, num_inference_steps=20, generator=UpperCamelCase__, output_type='np', )
_A = output.images
_A = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 7_68, 3)
_A = np.array([0.8_043, 0.926, 0.9_581, 0.8_119, 0.8_954, 0.913, 0.7_209, 0.7_463, 0.7_431] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 107 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
UpperCAmelCase = {
'''configuration_gpt_neo''': ['''GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoConfig''', '''GPTNeoOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoForCausalLM''',
'''GPTNeoForQuestionAnswering''',
'''GPTNeoForSequenceClassification''',
'''GPTNeoForTokenClassification''',
'''GPTNeoModel''',
'''GPTNeoPreTrainedModel''',
'''load_tf_weights_in_gpt_neo''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''FlaxGPTNeoForCausalLM''',
'''FlaxGPTNeoModel''',
'''FlaxGPTNeoPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 84 | 0 |
from ..utils import DummyObject, requires_backends
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] =["""sentencepiece"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["sentencepiece"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Tuple =["""sentencepiece"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["sentencepiece"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] =["""sentencepiece"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["sentencepiece"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] =["""sentencepiece"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["sentencepiece"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] =["""sentencepiece"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["sentencepiece"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : List[Any] =["""sentencepiece"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["sentencepiece"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : int =["""sentencepiece"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["sentencepiece"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] =["""sentencepiece"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["sentencepiece"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : List[str] =["""sentencepiece"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["sentencepiece"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : List[str] =["""sentencepiece"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["sentencepiece"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : List[str] =["""sentencepiece"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["sentencepiece"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : List[Any] =["""sentencepiece"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["sentencepiece"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] =["""sentencepiece"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["sentencepiece"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : int =["""sentencepiece"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["sentencepiece"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Tuple =["""sentencepiece"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["sentencepiece"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : List[Any] =["""sentencepiece"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["sentencepiece"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Tuple =["""sentencepiece"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["sentencepiece"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] =["""sentencepiece"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["sentencepiece"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : List[Any] =["""sentencepiece"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["sentencepiece"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] =["""sentencepiece"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["sentencepiece"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Any =["""sentencepiece"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["sentencepiece"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] =["""sentencepiece"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["sentencepiece"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : int =["""sentencepiece"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["sentencepiece"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Any =["""sentencepiece"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["sentencepiece"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] =["""sentencepiece"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["sentencepiece"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : List[Any] =["""sentencepiece"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["sentencepiece"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Tuple =["""sentencepiece"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["sentencepiece"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] =["""sentencepiece"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["sentencepiece"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] =["""sentencepiece"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["sentencepiece"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] =["""sentencepiece"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["sentencepiece"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : List[Any] =["""sentencepiece"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["sentencepiece"] )
| 717 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A : Optional[int] = logging.get_logger(__name__)
A : List[str] = {
"google/bit-50": "https://huggingface.co/google/bit-50/resolve/main/config.json",
}
class _UpperCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Any ="""bit"""
__UpperCAmelCase : Optional[int] =["""preactivation""", """bottleneck"""]
__UpperCAmelCase : List[str] =["""SAME""", """VALID"""]
def __init__( self , __a=3 , __a=64 , __a=[2_56, 5_12, 10_24, 20_48] , __a=[3, 4, 6, 3] , __a="preactivation" , __a="relu" , __a=None , __a=32 , __a=0.0 , __a=False , __a=32 , __a=1 , __a=None , __a=None , **__a , ):
super().__init__(**__a )
if layer_type not in self.layer_types:
raise ValueError(f"layer_type={layer_type} is not one of {','.join(self.layer_types )}" )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
__lowerCAmelCase = global_padding.upper()
else:
raise ValueError(f"Padding strategy {global_padding} not supported" )
__lowerCAmelCase = num_channels
__lowerCAmelCase = embedding_size
__lowerCAmelCase = hidden_sizes
__lowerCAmelCase = depths
__lowerCAmelCase = layer_type
__lowerCAmelCase = hidden_act
__lowerCAmelCase = global_padding
__lowerCAmelCase = num_groups
__lowerCAmelCase = drop_path_rate
__lowerCAmelCase = embedding_dynamic_padding
__lowerCAmelCase = output_stride
__lowerCAmelCase = width_factor
__lowerCAmelCase = ["stem"] + [f"stage{idx}" for idx in range(1 , len(__a ) + 1 )]
__lowerCAmelCase , __lowerCAmelCase = get_aligned_output_features_output_indices(
out_features=__a , out_indices=__a , stage_names=self.stage_names )
| 282 | 0 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class A ( UpperCAmelCase__ ):
'''simple docstring'''
@slow
@require_torch
def lowerCamelCase__ (self : int ) -> Tuple:
"""simple docstring"""
lowercase__ = EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" , """prajjwal1/bert-tiny""" )
lowercase__ = BertTokenizer.from_pretrained("""bert-base-uncased""" )
lowercase__ = bertabert.config.encoder.vocab_size
lowercase__ = tokenizer.sep_token_id
lowercase__ = tokenizer.cls_token_id
lowercase__ = 128
lowercase__ = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""train[:1%]""" )
lowercase__ = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""validation[:1%]""" )
lowercase__ = train_dataset.select(range(32 ) )
lowercase__ = val_dataset.select(range(16 ) )
lowercase__ = 4
def _map_to_encoder_decoder_inputs(_UpperCAmelCase : Any ):
# Tokenizer will automatically set [BOS] <text> [EOS]
lowercase__ = tokenizer(batch["""article"""] , padding="""max_length""" , truncation=_UpperCAmelCase , max_length=512 )
lowercase__ = tokenizer(batch["""highlights"""] , padding="""max_length""" , truncation=_UpperCAmelCase , max_length=128 )
lowercase__ = inputs.input_ids
lowercase__ = inputs.attention_mask
lowercase__ = outputs.input_ids
lowercase__ = outputs.input_ids.copy()
lowercase__ = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["""labels"""]
]
lowercase__ = outputs.attention_mask
assert all(len(_UpperCAmelCase ) == 512 for x in inputs.input_ids )
assert all(len(_UpperCAmelCase ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(_UpperCAmelCase : List[Any] ):
lowercase__ = pred.label_ids
lowercase__ = pred.predictions
# all unnecessary tokens are removed
lowercase__ = tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
lowercase__ = tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
lowercase__ = sum([int(pred_str[i] == label_str[i] ) for i in range(len(_UpperCAmelCase ) )] ) / len(_UpperCAmelCase )
return {"accuracy": accuracy}
# map train dataset
lowercase__ = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=_UpperCAmelCase , batch_size=_UpperCAmelCase , remove_columns=["""article""", """highlights"""] , )
train_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
# same for validation dataset
lowercase__ = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=_UpperCAmelCase , batch_size=_UpperCAmelCase , remove_columns=["""article""", """highlights"""] , )
val_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = SeqaSeqTrainingArguments(
output_dir=_UpperCAmelCase , per_device_train_batch_size=_UpperCAmelCase , per_device_eval_batch_size=_UpperCAmelCase , predict_with_generate=_UpperCAmelCase , evaluation_strategy="""steps""" , do_train=_UpperCAmelCase , do_eval=_UpperCAmelCase , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
lowercase__ = SeqaSeqTrainer(
model=_UpperCAmelCase , args=_UpperCAmelCase , compute_metrics=_compute_metrics , train_dataset=_UpperCAmelCase , eval_dataset=_UpperCAmelCase , tokenizer=_UpperCAmelCase , )
# start training
trainer.train()
| 15 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
A : Optional[Any] = logging.get_logger(__name__)
A : str = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
A : Any = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
A : Dict = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
A : Optional[int] = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
A : int = {
'facebook/dpr-ctx_encoder-single-nq-base': 5_1_2,
'facebook/dpr-ctx_encoder-multiset-base': 5_1_2,
}
A : Optional[Any] = {
'facebook/dpr-question_encoder-single-nq-base': 5_1_2,
'facebook/dpr-question_encoder-multiset-base': 5_1_2,
}
A : Any = {
'facebook/dpr-reader-single-nq-base': 5_1_2,
'facebook/dpr-reader-multiset-base': 5_1_2,
}
A : Union[str, Any] = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
A : Tuple = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
A : Any = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class A ( UpperCAmelCase__ ):
'''simple docstring'''
A__ = VOCAB_FILES_NAMES
A__ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
A__ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
A__ = DPRContextEncoderTokenizer
class A ( UpperCAmelCase__ ):
'''simple docstring'''
A__ = VOCAB_FILES_NAMES
A__ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
A__ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
A__ = DPRQuestionEncoderTokenizer
A : Any = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
A : Optional[Any] = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
A : int = r'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(UpperCAmelCase__ )
class A :
'''simple docstring'''
def __call__(self : str , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None , _UpperCAmelCase : Optional[str] = None , _UpperCAmelCase : Union[bool, str] = False , _UpperCAmelCase : Union[bool, str] = False , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , _UpperCAmelCase : Optional[bool] = None , **_UpperCAmelCase : Any , ) -> BatchEncoding:
"""simple docstring"""
if titles is None and texts is None:
return super().__call__(
_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase , return_tensors=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , **_UpperCAmelCase , )
elif titles is None or texts is None:
lowercase__ = titles if texts is None else texts
return super().__call__(
_UpperCAmelCase , _UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase , return_tensors=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , **_UpperCAmelCase , )
lowercase__ = titles if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) else [titles]
lowercase__ = texts if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) else [texts]
lowercase__ = len(_UpperCAmelCase )
lowercase__ = questions if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) else [questions] * n_passages
assert len(_UpperCAmelCase ) == len(
_UpperCAmelCase ), f'''There should be as many titles than texts but got {len(_UpperCAmelCase )} titles and {len(_UpperCAmelCase )} texts.'''
lowercase__ = super().__call__(_UpperCAmelCase , _UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase )["""input_ids"""]
lowercase__ = super().__call__(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase )["""input_ids"""]
lowercase__ = {
"""input_ids""": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_UpperCAmelCase , _UpperCAmelCase )
]
}
if return_attention_mask is not False:
lowercase__ = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
lowercase__ = attention_mask
return self.pad(_UpperCAmelCase , padding=_UpperCAmelCase , max_length=_UpperCAmelCase , return_tensors=_UpperCAmelCase )
def lowerCamelCase__ (self : List[str] , _UpperCAmelCase : BatchEncoding , _UpperCAmelCase : DPRReaderOutput , _UpperCAmelCase : int = 16 , _UpperCAmelCase : int = 64 , _UpperCAmelCase : int = 4 , ) -> List[DPRSpanPrediction]:
"""simple docstring"""
lowercase__ = reader_input["""input_ids"""]
lowercase__ , lowercase__ , lowercase__ = reader_output[:3]
lowercase__ = len(_UpperCAmelCase )
lowercase__ = sorted(range(_UpperCAmelCase ) , reverse=_UpperCAmelCase , key=relevance_logits.__getitem__ )
lowercase__ = []
for doc_id in sorted_docs:
lowercase__ = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
lowercase__ = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
lowercase__ = sequence_ids.index(self.pad_token_id )
else:
lowercase__ = len(_UpperCAmelCase )
lowercase__ = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=_UpperCAmelCase , top_spans=_UpperCAmelCase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=_UpperCAmelCase , start_index=_UpperCAmelCase , end_index=_UpperCAmelCase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(_UpperCAmelCase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def lowerCamelCase__ (self : Optional[int] , _UpperCAmelCase : List[int] , _UpperCAmelCase : List[int] , _UpperCAmelCase : int , _UpperCAmelCase : int , ) -> List[DPRSpanPrediction]:
"""simple docstring"""
lowercase__ = []
for start_index, start_score in enumerate(_UpperCAmelCase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
lowercase__ = sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : x[1] , reverse=_UpperCAmelCase )
lowercase__ = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, f'''Wrong span indices: [{start_index}:{end_index}]'''
lowercase__ = end_index - start_index + 1
assert length <= max_answer_length, f'''Span is too long: {length} > {max_answer_length}'''
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_UpperCAmelCase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(UpperCAmelCase__ )
class A ( UpperCAmelCase__ , UpperCAmelCase__ ):
'''simple docstring'''
A__ = VOCAB_FILES_NAMES
A__ = READER_PRETRAINED_VOCAB_FILES_MAP
A__ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ = READER_PRETRAINED_INIT_CONFIGURATION
A__ = ['''input_ids''', '''attention_mask''']
A__ = DPRReaderTokenizer
| 15 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class lowercase__ :
a_ =PegasusConfig
a_ ={}
a_ ="""gelu"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=2 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=40 , __UpperCAmelCase=2 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = seq_length
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = eos_token_id
lowerCAmelCase__ = pad_token_id
lowerCAmelCase__ = bos_token_id
def UpperCAmelCase ( self )-> Tuple:
'''simple docstring'''
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
lowerCAmelCase__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
lowerCAmelCase__ = tf.concat([input_ids, eos_tensor] , axis=1 )
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
lowerCAmelCase__ = prepare_pegasus_inputs_dict(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
return config, inputs_dict
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase )-> List[Any]:
'''simple docstring'''
lowerCAmelCase__ = TFPegasusModel(config=__UpperCAmelCase ).get_decoder()
lowerCAmelCase__ = inputs_dict["input_ids"]
lowerCAmelCase__ = input_ids[:1, :]
lowerCAmelCase__ = inputs_dict["attention_mask"][:1, :]
lowerCAmelCase__ = inputs_dict["head_mask"]
lowerCAmelCase__ = 1
# first forward pass
lowerCAmelCase__ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , head_mask=__UpperCAmelCase , use_cache=__UpperCAmelCase )
lowerCAmelCase__ , lowerCAmelCase__ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCAmelCase__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase__ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
lowerCAmelCase__ = tf.concat([input_ids, next_tokens] , axis=-1 )
lowerCAmelCase__ = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
lowerCAmelCase__ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )[0]
lowerCAmelCase__ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
lowerCAmelCase__ = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
lowerCAmelCase__ = output_from_no_past[:, -3:, random_slice_idx]
lowerCAmelCase__ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__UpperCAmelCase , __UpperCAmelCase , rtol=1E-3 )
def _a ( UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Any=None , UpperCamelCase_ : int=None , UpperCamelCase_ : str=None , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : int=None , ) -> int:
"""simple docstring"""
if attention_mask is None:
lowerCAmelCase__ = tf.cast(tf.math.not_equal(UpperCamelCase_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
lowerCAmelCase__ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
lowerCAmelCase__ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowerCAmelCase__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowerCAmelCase__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class lowercase__ ( _UpperCAmelCase, _UpperCAmelCase, unittest.TestCase ):
a_ =(TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
a_ =(TFPegasusForConditionalGeneration,) if is_tf_available() else ()
a_ =(
{
"""conversational""": TFPegasusForConditionalGeneration,
"""feature-extraction""": TFPegasusModel,
"""summarization""": TFPegasusForConditionalGeneration,
"""text2text-generation""": TFPegasusForConditionalGeneration,
"""translation""": TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
a_ =True
a_ =False
a_ =False
def UpperCAmelCase ( self )-> Any:
'''simple docstring'''
lowerCAmelCase__ = TFPegasusModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=__UpperCAmelCase )
def UpperCAmelCase ( self )-> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase ( self )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__UpperCAmelCase )
@require_sentencepiece
@require_tokenizers
@require_tf
class lowercase__ ( unittest.TestCase ):
a_ =[
""" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""",
""" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """,
]
a_ =[
"""California's largest electricity provider has cut power to hundreds of thousands of customers in an effort to"""
""" reduce the risk of wildfires.""",
"""N-Dubz have revealed they\'re \"grateful\" to have been nominated for four Mobo Awards.""",
] # differs slightly from pytorch, likely due to numerical differences in linear layers
a_ ="""google/pegasus-xsum"""
@cached_property
def UpperCAmelCase ( self )-> Any:
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
lowerCAmelCase__ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def UpperCAmelCase ( self , **__UpperCAmelCase )-> int:
'''simple docstring'''
lowerCAmelCase__ = self.translate_src_text(**__UpperCAmelCase )
assert self.expected_text == generated_words
def UpperCAmelCase ( self , **__UpperCAmelCase )-> Dict:
'''simple docstring'''
lowerCAmelCase__ = self.tokenizer(self.src_text , **__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors="tf" )
lowerCAmelCase__ = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__UpperCAmelCase , )
lowerCAmelCase__ = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__UpperCAmelCase )
return generated_words
@slow
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
self._assert_generated_batch_equal_expected()
| 714 |
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
a_ = logging.getLogger(__name__)
a_ = 50 # max width of layer names
a_ = 70 # max width of quantizer names
def _a ( UpperCamelCase_ : str ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ = parser.add_argument_group("quant_trainer arguments" )
group.add_argument("--wprec" , type=UpperCamelCase_ , default=8 , help="weight precision" )
group.add_argument("--aprec" , type=UpperCamelCase_ , default=8 , help="activation precision" )
group.add_argument("--quant-per-tensor" , action="store_true" , help="per tensor weight scaling" )
group.add_argument("--quant-disable" , action="store_true" , help="disable all quantizers" )
group.add_argument("--quant-disable-embeddings" , action="store_true" , help="disable all embeddings quantizers" )
group.add_argument("--quant-disable-keyword" , type=UpperCamelCase_ , nargs="+" , help="disable quantizers by keyword" )
group.add_argument("--quant-disable-layer-module" , type=UpperCamelCase_ , help="disable quantizers by keyword under layer." )
group.add_argument("--quant-enable-layer-module" , type=UpperCamelCase_ , help="enable quantizers by keyword under layer" )
group.add_argument("--calibrator" , default="max" , help="which quantization range calibrator to use" )
group.add_argument("--percentile" , default=UpperCamelCase_ , type=UpperCamelCase_ , help="percentile for PercentileCalibrator" )
group.add_argument("--fuse-qkv" , action="store_true" , help="use the same scale factor for qkv" )
group.add_argument("--clip-gelu" , metavar="N" , type=UpperCamelCase_ , help="clip gelu output maximum value to N" )
group.add_argument(
"--recalibrate-weights" , action="store_true" , help=(
"recalibrate weight amaxes by taking the max of the weights."
" amaxes will be computed with the current quantization granularity (axis)."
) , )
def _a ( UpperCamelCase_ : int ) -> List[Any]:
"""simple docstring"""
if args.calibrator == "max":
lowerCAmelCase__ = "max"
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError("Specify --percentile when using percentile calibrator" )
lowerCAmelCase__ = "histogram"
elif args.calibrator == "mse":
lowerCAmelCase__ = "histogram"
else:
raise ValueError(F"Invalid calibrator {args.calibrator}" )
lowerCAmelCase__ = QuantDescriptor(num_bits=args.aprec , calib_method=UpperCamelCase_ )
lowerCAmelCase__ = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(UpperCamelCase_ )
quant_nn.QuantLinear.set_default_quant_desc_weight(UpperCamelCase_ )
def _a ( UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Any=False , UpperCamelCase_ : List[Any]=False ) -> Any:
"""simple docstring"""
logger.info("Configuring Model for Quantization" )
logger.info(F"using quantization package {pytorch_quantization.__file__}" )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(UpperCamelCase_ , ["embeddings"] , which="weight" , _disabled=UpperCamelCase_ )
if args.quant_disable:
set_quantizer_by_name(UpperCamelCase_ , [""] , _disabled=UpperCamelCase_ )
if args.quant_disable_keyword:
set_quantizer_by_name(UpperCamelCase_ , args.quant_disable_keyword , _disabled=UpperCamelCase_ )
if args.quant_disable_layer_module:
set_quantizer_by_name(UpperCamelCase_ , [R"layer.\d+." + args.quant_disable_layer_module] , _disabled=UpperCamelCase_ )
if args.quant_enable_layer_module:
set_quantizer_by_name(UpperCamelCase_ , [R"layer.\d+." + args.quant_enable_layer_module] , _disabled=UpperCamelCase_ )
if args.recalibrate_weights:
recalibrate_weights(UpperCamelCase_ )
if args.fuse_qkv:
fuse_qkv(UpperCamelCase_ , UpperCamelCase_ )
if args.clip_gelu:
clip_gelu(UpperCamelCase_ , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(UpperCamelCase_ )
def _a ( UpperCamelCase_ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
logger.info("Enabling Calibration" )
for name, module in model.named_modules():
if name.endswith("_quantizer" ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(F"{name:80}: {module}" )
def _a ( UpperCamelCase_ : Dict , UpperCamelCase_ : Tuple ) -> str:
"""simple docstring"""
logger.info("Loading calibrated amax" )
for name, module in model.named_modules():
if name.endswith("_quantizer" ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax("percentile" , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(UpperCamelCase_ )
def _a ( UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : int ) -> int:
"""simple docstring"""
def fusea(UpperCamelCase_ : List[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[int] ):
for mod in [qq, qk, qv]:
if not hasattr(UpperCamelCase_ , "_amax" ):
print(" WARNING: NO AMAX BUFFER" )
return
lowerCAmelCase__ = qq._amax.detach().item()
lowerCAmelCase__ = qk._amax.detach().item()
lowerCAmelCase__ = qv._amax.detach().item()
lowerCAmelCase__ = max(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
qq._amax.fill_(UpperCamelCase_ )
qk._amax.fill_(UpperCamelCase_ )
qv._amax.fill_(UpperCamelCase_ )
logger.info(F" q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}" )
for name, mod in model.named_modules():
if name.endswith(".attention.self" ):
logger.info(F"FUSE_QKV: {name:{name_width}}" )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def _a ( UpperCamelCase_ : Any , UpperCamelCase_ : List[Any] ) -> Optional[int]:
"""simple docstring"""
for name, mod in model.named_modules():
if name.endswith(".output.dense" ) and not name.endswith("attention.output.dense" ):
lowerCAmelCase__ = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=UpperCamelCase_ )
lowerCAmelCase__ = mod._input_quantizer._amax.data.detach().item()
logger.info(F"CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}" )
def _a ( UpperCamelCase_ : str ) -> int:
"""simple docstring"""
for name, mod in model.named_modules():
if hasattr(UpperCamelCase_ , "_weight_quantizer" ) and mod._weight_quantizer.axis is not None:
lowerCAmelCase__ = mod.weight.shape[0]
lowerCAmelCase__ = mod._weight_quantizer._amax.detach()
lowerCAmelCase__ = torch.ones(UpperCamelCase_ , dtype=amax.dtype , device=amax.device ) * amax
print(F"expanding {name} {amax} -> {mod._weight_quantizer._amax}" )
def _a ( UpperCamelCase_ : Any ) -> Optional[int]:
"""simple docstring"""
for name, mod in model.named_modules():
if hasattr(UpperCamelCase_ , "_weight_quantizer" ):
if not hasattr(mod.weight_quantizer , "_amax" ):
print("RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER" )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
lowerCAmelCase__ = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
lowerCAmelCase__ = set(range(len(mod.weight.size() ) ) ) - axis_set
lowerCAmelCase__ = pytorch_quantization.utils.reduce_amax(mod.weight , axis=UpperCamelCase_ , keepdims=UpperCamelCase_ ).detach()
logger.info(F"RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}" )
lowerCAmelCase__ = amax
def _a ( UpperCamelCase_ : str , UpperCamelCase_ : Optional[Any]=25 , UpperCamelCase_ : str=180 , UpperCamelCase_ : Optional[int]=None ) -> Tuple:
"""simple docstring"""
if ignore is None:
lowerCAmelCase__ = []
elif not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
lowerCAmelCase__ = [ignore]
lowerCAmelCase__ = 0
for name, mod in model.named_modules():
if not hasattr(UpperCamelCase_ , "weight" ):
continue
lowerCAmelCase__ = max(UpperCamelCase_ , len(UpperCamelCase_ ) )
for name, mod in model.named_modules():
lowerCAmelCase__ = getattr(UpperCamelCase_ , "_input_quantizer" , UpperCamelCase_ )
lowerCAmelCase__ = getattr(UpperCamelCase_ , "_weight_quantizer" , UpperCamelCase_ )
if not hasattr(UpperCamelCase_ , "weight" ):
continue
if type(UpperCamelCase_ ) in ignore:
continue
if [True for s in ignore if type(UpperCamelCase_ ) is str and s in name]:
continue
lowerCAmelCase__ = F"Act:{input_q.extra_repr()}"
lowerCAmelCase__ = F"Wgt:{weight_q.extra_repr()}"
lowerCAmelCase__ = F"{name:{name_width}} {act_str} {wgt_str}"
if len(UpperCamelCase_ ) <= line_width:
logger.info(UpperCamelCase_ )
else:
logger.info(F"{name:{name_width}} {act_str}" )
logger.info(F"{' ':{name_width}} {wgt_str}" )
def _a ( UpperCamelCase_ : str ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ = 0
for name, mod in model.named_modules():
if isinstance(UpperCamelCase_ , pytorch_quantization.nn.TensorQuantizer ):
print(F"{name:80} {mod}" )
count += 1
print(F"{count} TensorQuantizers found in model" )
def _a ( UpperCamelCase_ : List[str] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : int ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ = getattr(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
if quantizer_mod is not None:
assert hasattr(UpperCamelCase_ , UpperCamelCase_ )
setattr(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
else:
logger.warning(F"{name} has no {quantizer}" )
def _a ( UpperCamelCase_ : List[Any] , UpperCamelCase_ : int , UpperCamelCase_ : Union[str, Any]="both" , **UpperCamelCase_ : Dict ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ = F"Warning: changing {which} quantizers of {name:{qname_width}}"
for k, v in kwargs.items():
s += F" {k}={v}"
if which in ["input", "both"]:
set_quantizer(UpperCamelCase_ , UpperCamelCase_ , "_input_quantizer" , UpperCamelCase_ , UpperCamelCase_ )
if which in ["weight", "both"]:
set_quantizer(UpperCamelCase_ , UpperCamelCase_ , "_weight_quantizer" , UpperCamelCase_ , UpperCamelCase_ )
logger.info(UpperCamelCase_ )
def _a ( UpperCamelCase_ : str , UpperCamelCase_ : List[Any] , **UpperCamelCase_ : Any ) -> Any:
"""simple docstring"""
for name, mod in model.named_modules():
if hasattr(UpperCamelCase_ , "_input_quantizer" ) or hasattr(UpperCamelCase_ , "_weight_quantizer" ):
for n in names:
if re.search(UpperCamelCase_ , UpperCamelCase_ ):
set_quantizers(UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ )
elif name.endswith("_quantizer" ):
for n in names:
if re.search(UpperCamelCase_ , UpperCamelCase_ ):
lowerCAmelCase__ = F"Warning: changing {name:{name_width}}"
for k, v in kwargs.items():
s += F" {k}={v}"
setattr(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
logger.info(UpperCamelCase_ )
| 115 | 0 |
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCamelCase_ ( a_ , unittest.TestCase ):
_lowercase : Union[str, Any] = OpenAIGPTTokenizer
_lowercase : Union[str, Any] = OpenAIGPTTokenizerFast
_lowercase : Tuple = True
_lowercase : Optional[Any] = False
def lowerCAmelCase_ ( self : Any ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__A : List[Any] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
__A : int = dict(zip(__A , range(len(__A ) ) ) )
__A : Dict = ['#version: 0.2', 'l o', 'lo w', 'e r</w>', '']
__A : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__A : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" ) as fp:
fp.write(json.dumps(__A ) )
with open(self.merges_file , """w""" ) as fp:
fp.write("""\n""".join(__A ) )
def lowerCAmelCase_ ( self : Any , __A : Optional[Any] ):
return "lower newer", "lower newer"
def lowerCAmelCase_ ( self : Optional[int] ):
__A : Tuple = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
__A : int = 'lower'
__A : Any = ['low', 'er</w>']
__A : Optional[Any] = tokenizer.tokenize(__A )
self.assertListEqual(__A , __A )
__A : Optional[Any] = tokens + ['<unk>']
__A : Optional[Any] = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , __A )
def lowerCAmelCase_ ( self : Any , __A : Tuple=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__A : Optional[Any] = self.rust_tokenizer_class.from_pretrained(__A , **__A )
# Simple input
__A : Dict = 'This is a simple input'
__A : Dict = ['This is a simple input 1', 'This is a simple input 2']
__A : Any = ('This is a simple input', 'This is a pair')
__A : int = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(__A , tokenizer_r.encode , __A , max_length=__A , padding="""max_length""" )
# Simple input
self.assertRaises(__A , tokenizer_r.encode_plus , __A , max_length=__A , padding="""max_length""" )
# Simple input
self.assertRaises(
__A , tokenizer_r.batch_encode_plus , __A , max_length=__A , padding="""max_length""" , )
# Pair input
self.assertRaises(__A , tokenizer_r.encode , __A , max_length=__A , padding="""max_length""" )
# Pair input
self.assertRaises(__A , tokenizer_r.encode_plus , __A , max_length=__A , padding="""max_length""" )
# Pair input
self.assertRaises(
__A , tokenizer_r.batch_encode_plus , __A , max_length=__A , padding="""max_length""" , )
def lowerCAmelCase_ ( self : Any ):
pass
@require_ftfy
@require_spacy
@require_tokenizers
class lowerCamelCase_ ( a_ ):
pass
| 17 |
'''simple docstring'''
from math import factorial
def lowerCAmelCase__ ( lowerCamelCase : int ,lowerCamelCase : int ,lowerCamelCase : float ):
if successes > trials:
raise ValueError('successes must be lower or equal to trials' )
if trials < 0 or successes < 0:
raise ValueError('the function is defined for non-negative integers' )
if not isinstance(lowerCamelCase ,lowerCamelCase ) or not isinstance(lowerCamelCase ,lowerCamelCase ):
raise ValueError('the function is defined for non-negative integers' )
if not 0 < prob < 1:
raise ValueError('prob has to be in range of 1 - 0' )
_A : str = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
_A : Any = float(factorial(lowerCamelCase ) )
coefficient /= factorial(lowerCamelCase ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('''Probability of 2 successes out of 4 trails''')
print('''with probability of 0.75 is:''', end=''' ''')
print(binomial_distribution(2, 4, 0.75))
| 128 | 0 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
UpperCamelCase__ : List[Any] = pytest.mark.integration
@require_faiss
class _UpperCamelCase ( __lowercase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ = Dataset.from_dict({"""filename""": ["""my_name-train""" + """_""" + str(__lowercase ) for x in np.arange(30 ).tolist()]} )
return dset
def SCREAMING_SNAKE_CASE ( self : Tuple ):
'''simple docstring'''
import faiss
UpperCAmelCase_ = self._create_dummy_dataset()
UpperCAmelCase_ = dset.map(
lambda __lowercase , __lowercase : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=__lowercase , keep_in_memory=__lowercase )
UpperCAmelCase_ = dset.add_faiss_index("""vecs""" , batch_size=1_00 , metric_type=faiss.METRIC_INNER_PRODUCT )
UpperCAmelCase_ = dset.get_nearest_examples("""vecs""" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["""filename"""][0] , """my_name-train_29""" )
dset.drop_index("""vecs""" )
def SCREAMING_SNAKE_CASE ( self : int ):
'''simple docstring'''
import faiss
UpperCAmelCase_ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="""vecs""" , batch_size=1_00 , metric_type=faiss.METRIC_INNER_PRODUCT , )
UpperCAmelCase_ = dset.get_nearest_examples("""vecs""" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["""filename"""][0] , """my_name-train_29""" )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
import faiss
UpperCAmelCase_ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="""vecs""" , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__lowercase ) as tmp_file:
dset.save_faiss_index("""vecs""" , tmp_file.name )
dset.load_faiss_index("""vecs2""" , tmp_file.name )
os.unlink(tmp_file.name )
UpperCAmelCase_ = dset.get_nearest_examples("""vecs2""" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["""filename"""][0] , """my_name-train_29""" )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="""vecs""" )
dset.drop_index("""vecs""" )
self.assertRaises(__lowercase , partial(dset.get_nearest_examples , """vecs2""" , np.ones(5 , dtype=np.floataa ) ) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
'''simple docstring'''
from elasticsearch import Elasticsearch
UpperCAmelCase_ = self._create_dummy_dataset()
with patch("""elasticsearch.Elasticsearch.search""" ) as mocked_search, patch(
"""elasticsearch.client.IndicesClient.create""" ) as mocked_index_create, patch("""elasticsearch.helpers.streaming_bulk""" ) as mocked_bulk:
UpperCAmelCase_ = {"acknowledged": True}
mocked_bulk.return_value([(True, None)] * 30 )
UpperCAmelCase_ = {"hits": {"hits": [{"_score": 1, "_id": 29}]}}
UpperCAmelCase_ = Elasticsearch()
dset.add_elasticsearch_index("""filename""" , es_client=__lowercase )
UpperCAmelCase_ = dset.get_nearest_examples("""filename""" , """my_name-train_29""" )
self.assertEqual(examples["""filename"""][0] , """my_name-train_29""" )
@require_faiss
class _UpperCamelCase ( __lowercase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
'''simple docstring'''
import faiss
UpperCAmelCase_ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
UpperCAmelCase_ = np.zeros(5 , dtype=np.floataa )
UpperCAmelCase_ = 1
UpperCAmelCase_ = index.search(__lowercase )
self.assertRaises(__lowercase , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
UpperCAmelCase_ = np.eye(5 , dtype=np.floataa )[::-1]
UpperCAmelCase_ = index.search_batch(__lowercase )
self.assertRaises(__lowercase , index.search_batch , queries[0] )
UpperCAmelCase_ = [scores[0] for scores in total_scores]
UpperCAmelCase_ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__lowercase ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , __lowercase )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
import faiss
UpperCAmelCase_ = FaissIndex(string_factory="""Flat""" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
UpperCAmelCase_ = FaissIndex(string_factory="""LSH""" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(__lowercase ):
UpperCAmelCase_ = FaissIndex(string_factory="""Flat""" , custom_index=faiss.IndexFlat(5 ) )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
'''simple docstring'''
import faiss
UpperCAmelCase_ = faiss.IndexFlat(5 )
UpperCAmelCase_ = FaissIndex(custom_index=__lowercase )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def SCREAMING_SNAKE_CASE ( self : Any ):
'''simple docstring'''
import faiss
UpperCAmelCase_ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__lowercase ) as tmp_file:
index.save(tmp_file.name )
UpperCAmelCase_ = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
UpperCAmelCase_ = np.zeros(5 , dtype=np.floataa )
UpperCAmelCase_ = 1
UpperCAmelCase_ = index.search(__lowercase )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def A_( A ):
import faiss
UpperCAmelCase_ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
UpperCAmelCase_ = "index.faiss"
UpperCAmelCase_ = f"""mock://{index_name}"""
index.save(_lowerCamelCase , storage_options=mockfs.storage_options )
UpperCAmelCase_ = FaissIndex.load(_lowerCamelCase , storage_options=mockfs.storage_options )
UpperCAmelCase_ = np.zeros(5 , dtype=np.floataa )
UpperCAmelCase_ = 1
UpperCAmelCase_ = index.search(_lowerCamelCase )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class _UpperCamelCase ( __lowercase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( self : List[str] ):
'''simple docstring'''
from elasticsearch import Elasticsearch
with patch("""elasticsearch.Elasticsearch.search""" ) as mocked_search, patch(
"""elasticsearch.client.IndicesClient.create""" ) as mocked_index_create, patch("""elasticsearch.helpers.streaming_bulk""" ) as mocked_bulk:
UpperCAmelCase_ = Elasticsearch()
UpperCAmelCase_ = {"acknowledged": True}
UpperCAmelCase_ = ElasticSearchIndex(es_client=__lowercase )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(["""foo""", """bar""", """foobar"""] )
# single query
UpperCAmelCase_ = "foo"
UpperCAmelCase_ = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
UpperCAmelCase_ = index.search(__lowercase )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
UpperCAmelCase_ = "foo"
UpperCAmelCase_ = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
UpperCAmelCase_ = index.search(__lowercase , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
UpperCAmelCase_ = ["foo", "bar", "foobar"]
UpperCAmelCase_ = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
UpperCAmelCase_ = index.search_batch(__lowercase )
UpperCAmelCase_ = [scores[0] for scores in total_scores]
UpperCAmelCase_ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__lowercase ) , 0 )
self.assertListEqual([1, 1, 1] , __lowercase )
# batched queries with timeout
UpperCAmelCase_ = ["foo", "bar", "foobar"]
UpperCAmelCase_ = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
UpperCAmelCase_ = index.search_batch(__lowercase , request_timeout=30 )
UpperCAmelCase_ = [scores[0] for scores in total_scores]
UpperCAmelCase_ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__lowercase ) , 0 )
self.assertListEqual([1, 1, 1] , __lowercase )
| 706 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import (
BaseOutput,
OptionalDependencyNotAvailable,
is_flax_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_onnx_available,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
@dataclass
class _UpperCamelCase ( A_ ):
'''simple docstring'''
lowerCamelCase : Union[List[PIL.Image.Image], np.ndarray]
lowerCamelCase : Optional[List[bool]]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_cycle_diffusion import CycleDiffusionPipeline
from .pipeline_stable_diffusion import StableDiffusionPipeline
from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline
from .pipeline_stable_diffusion_imgaimg import StableDiffusionImgaImgPipeline
from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline
from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy
from .pipeline_stable_diffusion_instruct_pixapix import StableDiffusionInstructPixaPixPipeline
from .pipeline_stable_diffusion_latent_upscale import StableDiffusionLatentUpscalePipeline
from .pipeline_stable_diffusion_ldmad import StableDiffusionLDMaDPipeline
from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline
from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline
from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline
from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline
from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from .pipeline_stable_unclip import StableUnCLIPPipeline
from .pipeline_stable_unclip_imgaimg import StableUnCLIPImgaImgPipeline
from .safety_checker import StableDiffusionSafetyChecker
from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import StableDiffusionImageVariationPipeline
else:
from .pipeline_stable_diffusion_image_variation import StableDiffusionImageVariationPipeline
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.26.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionPixaPixZeroPipeline,
)
else:
from .pipeline_stable_diffusion_depthaimg import StableDiffusionDepthaImgPipeline
from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline
from .pipeline_stable_diffusion_pixapix_zero import StableDiffusionPixaPixZeroPipeline
try:
if not (
is_torch_available()
and is_transformers_available()
and is_k_diffusion_available()
and is_k_diffusion_version(""">=""", """0.0.12""")
):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline
try:
if not (is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_onnx_objects import * # noqa F403
else:
from .pipeline_onnx_stable_diffusion import OnnxStableDiffusionPipeline, StableDiffusionOnnxPipeline
from .pipeline_onnx_stable_diffusion_imgaimg import OnnxStableDiffusionImgaImgPipeline
from .pipeline_onnx_stable_diffusion_inpaint import OnnxStableDiffusionInpaintPipeline
from .pipeline_onnx_stable_diffusion_inpaint_legacy import OnnxStableDiffusionInpaintPipelineLegacy
from .pipeline_onnx_stable_diffusion_upscale import OnnxStableDiffusionUpscalePipeline
if is_transformers_available() and is_flax_available():
import flax
@flax.struct.dataclass
class _UpperCamelCase ( A_ ):
'''simple docstring'''
lowerCamelCase : np.ndarray
lowerCamelCase : List[bool]
from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState
from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline
from .pipeline_flax_stable_diffusion_imgaimg import FlaxStableDiffusionImgaImgPipeline
from .pipeline_flax_stable_diffusion_inpaint import FlaxStableDiffusionInpaintPipeline
from .safety_checker_flax import FlaxStableDiffusionSafetyChecker
| 486 | 0 |
"""simple docstring"""
from math import ceil
def _snake_case ( __snake_case : int = 1001 ):
"""simple docstring"""
_lowerCamelCase : Dict = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
_lowerCamelCase : List[str] = 2 * i + 1
_lowerCamelCase : Optional[Any] = 2 * i
_lowerCamelCase : int = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
UpperCAmelCase = int(sys.argv[1])
print(solution(n))
except ValueError:
print("""Invalid entry - please enter a number""")
| 88 |
def _lowerCAmelCase ( _lowerCAmelCase = 100 ) -> int:
'''simple docstring'''
__snake_case = n * (n + 1) * (2 * n + 1) / 6
__snake_case = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 371 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[int] = 10
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = [1, 2, 3, 4]
__a : Union[str, Any] = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(__UpperCamelCase , self.block_size , 0 ) , __UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
__a : Dict = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(__UpperCamelCase , self.block_size , 0 ) , __UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Tuple = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
__a : List[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(__UpperCamelCase , self.block_size , 0 ) , __UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = """It was the year of Our Lord one thousand seven hundred and
seventy-five.\n\nSpiritual revelations were conceded to England at that
favoured period, as at this."""
__a , __a : str = process_story(__UpperCamelCase )
self.assertEqual(__UpperCamelCase , [] )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Tuple = """"""
__a , __a : List[Any] = process_story(__UpperCamelCase )
self.assertEqual(__UpperCamelCase , [] )
self.assertEqual(__UpperCamelCase , [] )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = (
"""It was the year of Our Lord one thousand seven hundred and """
"""seventy-five\n\nSpiritual revelations were conceded to England """
"""at that favoured period, as at this.\n@highlight\n\nIt was the best of times"""
)
__a , __a : Optional[int] = process_story(__UpperCamelCase )
__a : Any = [
"""It was the year of Our Lord one thousand seven hundred and seventy-five.""",
"""Spiritual revelations were conceded to England at that favoured period, as at this.""",
]
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
__a : Dict = ["""It was the best of times."""]
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Dict = torch.tensor([1, 2, 3, 4] )
__a : Union[str, Any] = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(__UpperCamelCase , 0 ).numpy() , expected.numpy() )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : str = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
__a : int = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(__UpperCamelCase , 23 ).numpy() , expected.numpy() )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
__a : List[Any] = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(__UpperCamelCase , 1 ).numpy() , expected.numpy() )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = 101
__a : str = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
__a : Optional[Any] = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
__a : Union[str, Any] = compute_token_type_ids(__UpperCamelCase , __UpperCamelCase )
np.testing.assert_array_equal(__UpperCamelCase , __UpperCamelCase ) | 697 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__SCREAMING_SNAKE_CASE : List[str] = {
'configuration_blenderbot_small': [
'BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlenderbotSmallConfig',
'BlenderbotSmallOnnxConfig',
],
'tokenization_blenderbot_small': ['BlenderbotSmallTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['BlenderbotSmallTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : List[str] = [
'BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlenderbotSmallForCausalLM',
'BlenderbotSmallForConditionalGeneration',
'BlenderbotSmallModel',
'BlenderbotSmallPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Optional[int] = [
'TFBlenderbotSmallForConditionalGeneration',
'TFBlenderbotSmallModel',
'TFBlenderbotSmallPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = [
'FlaxBlenderbotSmallForConditionalGeneration',
'FlaxBlenderbotSmallModel',
'FlaxBlenderbotSmallPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 697 | 1 |
def _A ( SCREAMING_SNAKE_CASE ):
return str(lowercase__ ) == str(lowercase__ )[::-1]
def _A ( SCREAMING_SNAKE_CASE ):
return int(lowercase__ ) + int(str(lowercase__ )[::-1] )
def _A ( SCREAMING_SNAKE_CASE = 1_0_0_0_0 ):
UpperCAmelCase__: List[str] = []
for num in range(1 ,lowercase__ ):
UpperCAmelCase__: Optional[int] = 0
UpperCAmelCase__: str = num
while iterations < 5_0:
UpperCAmelCase__: Tuple = sum_reverse(lowercase__ )
iterations += 1
if is_palindrome(lowercase__ ):
break
else:
lychrel_nums.append(lowercase__ )
return len(lowercase__ )
if __name__ == "__main__":
print(F"{solution() = }") | 113 |
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class __a ( __UpperCamelCase ):
__snake_case : Union[str, Any] = ComputeEnvironment.AMAZON_SAGEMAKER
__snake_case : Tuple = True
__snake_case : List[str] = """ml.p3.2xlarge"""
__snake_case : Optional[int] = """accelerate_sagemaker_execution_role"""
__snake_case : List[Any] = """hf-sm"""
__snake_case : str = """us-east-1"""
__snake_case : int = 1
__snake_case : int = """accelerate-sagemaker-1"""
__snake_case : Union[str, Any] = """1.6"""
__snake_case : Tuple = """4.4"""
__snake_case : List[str] = """train.py"""
__snake_case : str = [
"""--model_name_or_path""",
"""bert""",
"""--do_train""",
"""False""",
"""--epochs""",
"""3""",
"""--learning_rate""",
"""5e-5""",
"""--max_steps""",
"""50.5""",
]
__snake_case : List[Any] = [
"""--model_name_or_path""",
"""bert""",
"""--do_train""",
"""--do_test""",
"""False""",
"""--do_predict""",
"""--epochs""",
"""3""",
"""--learning_rate""",
"""5e-5""",
"""--max_steps""",
"""50.5""",
]
class __a ( unittest.TestCase ):
def A ( self : int ):
# If no defaults are changed, `to_kwargs` returns an empty dict.
lowerCAmelCase_ : Dict = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args )
assert isinstance(converted_args["""model_name_or_path"""] , UpperCAmelCase )
assert isinstance(converted_args["""do_train"""] , UpperCAmelCase )
assert isinstance(converted_args["""epochs"""] , UpperCAmelCase )
assert isinstance(converted_args["""learning_rate"""] , UpperCAmelCase )
assert isinstance(converted_args["""max_steps"""] , UpperCAmelCase )
with pytest.raises(UpperCAmelCase ):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
| 600 | 0 |
'''simple docstring'''
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def __lowerCAmelCase ( lowerCamelCase : str , lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
assert isinstance(lowerCamelCase , lowerCamelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def __lowerCAmelCase ( lowerCamelCase : Tuple , lowerCamelCase : int , lowerCamelCase : Optional[int] ):
'''simple docstring'''
__lowerCAmelCase = tmp_path / "cache"
__lowerCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__lowerCAmelCase = JsonDatasetReader(lowerCamelCase , cache_dir=lowerCamelCase , keep_in_memory=lowerCamelCase ).read()
_check_json_dataset(lowerCamelCase , lowerCamelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def __lowerCAmelCase ( lowerCamelCase : List[Any] , lowerCamelCase : int , lowerCamelCase : Dict ):
'''simple docstring'''
__lowerCAmelCase = tmp_path / "cache"
__lowerCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
__lowerCAmelCase = features.copy() if features else default_expected_features
__lowerCAmelCase = (
Features({feature: Value(lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__lowerCAmelCase = JsonDatasetReader(lowerCamelCase , features=lowerCamelCase , cache_dir=lowerCamelCase ).read()
_check_json_dataset(lowerCamelCase , lowerCamelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"col_3": "float64", "col_1": "string", "col_2": "int64"},
] , )
def __lowerCAmelCase ( lowerCamelCase : List[Any] , lowerCamelCase : Optional[int] , lowerCamelCase : Any ):
'''simple docstring'''
__lowerCAmelCase = tmp_path / "cache"
__lowerCAmelCase = {"col_3": "float64", "col_1": "string", "col_2": "int64"}
__lowerCAmelCase = features.copy() if features else default_expected_features
__lowerCAmelCase = (
Features({feature: Value(lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__lowerCAmelCase = JsonDatasetReader(lowerCamelCase , features=lowerCamelCase , cache_dir=lowerCamelCase ).read()
assert isinstance(lowerCamelCase , lowerCamelCase )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def __lowerCAmelCase ( lowerCamelCase : Any , lowerCamelCase : Optional[int] ):
'''simple docstring'''
__lowerCAmelCase = {"col_2": "int64", "col_3": "float64", "col_1": "string"}
__lowerCAmelCase = features.copy()
__lowerCAmelCase = (
Features({feature: Value(lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__lowerCAmelCase = tmp_path / "cache"
__lowerCAmelCase = JsonDatasetReader(lowerCamelCase , features=lowerCamelCase , cache_dir=lowerCamelCase ).read()
assert isinstance(lowerCamelCase , lowerCamelCase )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def __lowerCAmelCase ( lowerCamelCase : List[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
__lowerCAmelCase = tmp_path / "cache"
__lowerCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
__lowerCAmelCase = JsonDatasetReader(lowerCamelCase , cache_dir=lowerCamelCase , split=lowerCamelCase ).read()
_check_json_dataset(lowerCamelCase , lowerCamelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def __lowerCAmelCase ( lowerCamelCase : Any , lowerCamelCase : List[str] , lowerCamelCase : Optional[int] ):
'''simple docstring'''
if issubclass(lowerCamelCase , lowerCamelCase ):
__lowerCAmelCase = jsonl_path
elif issubclass(lowerCamelCase , lowerCamelCase ):
__lowerCAmelCase = [jsonl_path]
__lowerCAmelCase = tmp_path / "cache"
__lowerCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
__lowerCAmelCase = JsonDatasetReader(lowerCamelCase , cache_dir=lowerCamelCase ).read()
_check_json_dataset(lowerCamelCase , lowerCamelCase )
def __lowerCAmelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : int , lowerCamelCase : str=("train",) ):
'''simple docstring'''
assert isinstance(lowerCamelCase , lowerCamelCase )
for split in splits:
__lowerCAmelCase = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def __lowerCAmelCase ( lowerCamelCase : Dict , lowerCamelCase : int , lowerCamelCase : List[str] ):
'''simple docstring'''
__lowerCAmelCase = tmp_path / "cache"
__lowerCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__lowerCAmelCase = JsonDatasetReader({"train": jsonl_path} , cache_dir=lowerCamelCase , keep_in_memory=lowerCamelCase ).read()
_check_json_datasetdict(lowerCamelCase , lowerCamelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def __lowerCAmelCase ( lowerCamelCase : int , lowerCamelCase : Any , lowerCamelCase : Dict ):
'''simple docstring'''
__lowerCAmelCase = tmp_path / "cache"
__lowerCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
__lowerCAmelCase = features.copy() if features else default_expected_features
__lowerCAmelCase = (
Features({feature: Value(lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__lowerCAmelCase = JsonDatasetReader({"train": jsonl_path} , features=lowerCamelCase , cache_dir=lowerCamelCase ).read()
_check_json_datasetdict(lowerCamelCase , lowerCamelCase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def __lowerCAmelCase ( lowerCamelCase : int , lowerCamelCase : Optional[int] , lowerCamelCase : int ):
'''simple docstring'''
if split:
__lowerCAmelCase = {split: jsonl_path}
else:
__lowerCAmelCase = "train"
__lowerCAmelCase = {"train": jsonl_path, "test": jsonl_path}
__lowerCAmelCase = tmp_path / "cache"
__lowerCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
__lowerCAmelCase = JsonDatasetReader(lowerCamelCase , cache_dir=lowerCamelCase ).read()
_check_json_datasetdict(lowerCamelCase , lowerCamelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def __lowerCAmelCase ( lowerCamelCase : Optional[Any] ):
'''simple docstring'''
return json.load(lowerCamelCase )
def __lowerCAmelCase ( lowerCamelCase : Tuple ):
'''simple docstring'''
return [json.loads(lowerCamelCase ) for line in buffer]
class UpperCAmelCase__ :
@pytest.mark.parametrize("lines, load_json_function" , [(True, load_json_lines), (False, load_json)] )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int:
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase , UpperCamelCase , lines=UpperCamelCase ).write()
buffer.seek(0 )
__lowerCAmelCase = load_json_function(UpperCamelCase )
assert isinstance(UpperCamelCase , UpperCamelCase )
assert isinstance(exported_content[0] , UpperCamelCase )
assert len(UpperCamelCase ) == 10
@pytest.mark.parametrize(
"orient, container, keys, len_at" , [
("records", list, {"tokens", "labels", "answers", "id"}, None),
("split", dict, {"columns", "data"}, "data"),
("index", dict, set("0123456789" ), None),
("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"),
("values", list, None, None),
("table", dict, {"schema", "data"}, "data"),
] , )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int:
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase , UpperCamelCase , lines=UpperCamelCase , orient=UpperCamelCase ).write()
buffer.seek(0 )
__lowerCAmelCase = load_json(UpperCamelCase )
assert isinstance(UpperCamelCase , UpperCamelCase )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(UpperCamelCase , "keys" ) and not hasattr(exported_content[0] , "keys" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(UpperCamelCase ) == 10
@pytest.mark.parametrize("lines, load_json_function" , [(True, load_json_lines), (False, load_json)] )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> str:
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase , UpperCamelCase , lines=UpperCamelCase , num_proc=2 ).write()
buffer.seek(0 )
__lowerCAmelCase = load_json_function(UpperCamelCase )
assert isinstance(UpperCamelCase , UpperCamelCase )
assert isinstance(exported_content[0] , UpperCamelCase )
assert len(UpperCamelCase ) == 10
@pytest.mark.parametrize(
"orient, container, keys, len_at" , [
("records", list, {"tokens", "labels", "answers", "id"}, None),
("split", dict, {"columns", "data"}, "data"),
("index", dict, set("0123456789" ), None),
("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"),
("values", list, None, None),
("table", dict, {"schema", "data"}, "data"),
] , )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Union[str, Any]:
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase , UpperCamelCase , lines=UpperCamelCase , orient=UpperCamelCase , num_proc=2 ).write()
buffer.seek(0 )
__lowerCAmelCase = load_json(UpperCamelCase )
assert isinstance(UpperCamelCase , UpperCamelCase )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(UpperCamelCase , "keys" ) and not hasattr(exported_content[0] , "keys" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(UpperCamelCase ) == 10
def UpperCAmelCase_ ( self , UpperCamelCase ) -> Any:
with pytest.raises(UpperCamelCase ):
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase , UpperCamelCase , num_proc=0 )
@pytest.mark.parametrize("compression, extension" , [("gzip", "gz"), ("bz2", "bz2"), ("xz", "xz")] )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Tuple:
__lowerCAmelCase = tmp_path_factory.mktemp("data" ) / F'''test.json.{extension}'''
__lowerCAmelCase = str(shared_datadir / F'''test_file.json.{extension}''' )
JsonDatasetWriter(UpperCamelCase , UpperCamelCase , compression=UpperCamelCase ).write()
with fsspec.open(UpperCamelCase , "rb" , compression="infer" ) as f:
__lowerCAmelCase = f.read()
with fsspec.open(UpperCamelCase , "rb" , compression="infer" ) as f:
__lowerCAmelCase = f.read()
assert exported_content == original_content | 39 |
'''simple docstring'''
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def __lowerCAmelCase ( lowerCamelCase : bytes , lowerCamelCase : int ):
'''simple docstring'''
__lowerCAmelCase = f'''{sampling_rate}'''
__lowerCAmelCase = "1"
__lowerCAmelCase = "f32le"
__lowerCAmelCase = [
"ffmpeg",
"-i",
"pipe:0",
"-ac",
ac,
"-ar",
ar,
"-f",
format_for_conversion,
"-hide_banner",
"-loglevel",
"quiet",
"pipe:1",
]
try:
with subprocess.Popen(lowerCamelCase , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
__lowerCAmelCase = ffmpeg_process.communicate(lowerCamelCase )
except FileNotFoundError as error:
raise ValueError("ffmpeg was not found but is required to load audio files from filename" ) from error
__lowerCAmelCase = output_stream[0]
__lowerCAmelCase = np.frombuffer(lowerCamelCase , np.floataa )
if audio.shape[0] == 0:
raise ValueError("Malformed soundfile" )
return audio
def __lowerCAmelCase ( lowerCamelCase : int , lowerCamelCase : float , lowerCamelCase : str = "f32le" , ):
'''simple docstring'''
__lowerCAmelCase = f'''{sampling_rate}'''
__lowerCAmelCase = "1"
if format_for_conversion == "s16le":
__lowerCAmelCase = 2
elif format_for_conversion == "f32le":
__lowerCAmelCase = 4
else:
raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
__lowerCAmelCase = platform.system()
if system == "Linux":
__lowerCAmelCase = "alsa"
__lowerCAmelCase = "default"
elif system == "Darwin":
__lowerCAmelCase = "avfoundation"
__lowerCAmelCase = ":0"
elif system == "Windows":
__lowerCAmelCase = "dshow"
__lowerCAmelCase = "default"
__lowerCAmelCase = [
"ffmpeg",
"-f",
format_,
"-i",
input_,
"-ac",
ac,
"-ar",
ar,
"-f",
format_for_conversion,
"-fflags",
"nobuffer",
"-hide_banner",
"-loglevel",
"quiet",
"pipe:1",
]
__lowerCAmelCase = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
__lowerCAmelCase = _ffmpeg_stream(lowerCamelCase , lowerCamelCase )
for item in iterator:
yield item
def __lowerCAmelCase ( lowerCamelCase : int , lowerCamelCase : float , lowerCamelCase : Optional[int] = None , lowerCamelCase : Optional[Union[Tuple[float, float], float]] = None , lowerCamelCase : str = "f32le" , ):
'''simple docstring'''
if stream_chunk_s is not None:
__lowerCAmelCase = stream_chunk_s
else:
__lowerCAmelCase = chunk_length_s
__lowerCAmelCase = ffmpeg_microphone(lowerCamelCase , lowerCamelCase , format_for_conversion=lowerCamelCase )
if format_for_conversion == "s16le":
__lowerCAmelCase = np.intaa
__lowerCAmelCase = 2
elif format_for_conversion == "f32le":
__lowerCAmelCase = np.floataa
__lowerCAmelCase = 4
else:
raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
if stride_length_s is None:
__lowerCAmelCase = chunk_length_s / 6
__lowerCAmelCase = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(lowerCamelCase , (int, float) ):
__lowerCAmelCase = [stride_length_s, stride_length_s]
__lowerCAmelCase = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
__lowerCAmelCase = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
__lowerCAmelCase = datetime.datetime.now()
__lowerCAmelCase = datetime.timedelta(seconds=lowerCamelCase )
for item in chunk_bytes_iter(lowerCamelCase , lowerCamelCase , stride=(stride_left, stride_right) , stream=lowerCamelCase ):
# Put everything back in numpy scale
__lowerCAmelCase = np.frombuffer(item["raw"] , dtype=lowerCamelCase )
__lowerCAmelCase = (
item["stride"][0] // size_of_sample,
item["stride"][1] // size_of_sample,
)
__lowerCAmelCase = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def __lowerCAmelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : int , lowerCamelCase : Tuple[int, int] , lowerCamelCase : bool = False ):
'''simple docstring'''
__lowerCAmelCase = B""
__lowerCAmelCase , __lowerCAmelCase = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
f'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' )
__lowerCAmelCase = 0
for raw in iterator:
acc += raw
if stream and len(lowerCamelCase ) < chunk_len:
__lowerCAmelCase = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(lowerCamelCase ) >= chunk_len:
# We are flushing the accumulator
__lowerCAmelCase = (_stride_left, stride_right)
__lowerCAmelCase = {"raw": acc[:chunk_len], "stride": stride}
if stream:
__lowerCAmelCase = False
yield item
__lowerCAmelCase = stride_left
__lowerCAmelCase = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(lowerCamelCase ) > stride_left:
__lowerCAmelCase = {"raw": acc, "stride": (_stride_left, 0)}
if stream:
__lowerCAmelCase = False
yield item
def __lowerCAmelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : int ):
'''simple docstring'''
__lowerCAmelCase = 2**24 # 16Mo
try:
with subprocess.Popen(lowerCamelCase , stdout=subprocess.PIPE , bufsize=lowerCamelCase ) as ffmpeg_process:
while True:
__lowerCAmelCase = ffmpeg_process.stdout.read(lowerCamelCase )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError("ffmpeg was not found but is required to stream audio files from filename" ) from error | 39 | 1 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __lowerCamelCase ( metaclass=lowerCAmelCase ):
a__: Optional[Any] = ['keras_nlp']
def __init__( self , *UpperCAmelCase , **UpperCAmelCase ):
requires_backends(self , ['''keras_nlp'''] )
| 29 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class __lowerCamelCase ( unittest.TestCase ):
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
lowerCamelCase_ = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'''image_std''': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
lowerCamelCase_ = os.path.join(self.tmpdirname , UpperCAmelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase__ ( self , **UpperCAmelCase ):
return BertTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def UpperCAmelCase__ ( self , **UpperCAmelCase ):
return BertTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def UpperCAmelCase__ ( self , **UpperCAmelCase ):
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def UpperCAmelCase__ ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCamelCase_ = [Image.fromarray(np.moveaxis(UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = self.get_rust_tokenizer()
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
lowerCamelCase_ = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCAmelCase )
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
lowerCamelCase_ = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , UpperCAmelCase )
self.assertIsInstance(processor_fast.tokenizer , UpperCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , UpperCAmelCase )
self.assertIsInstance(processor_fast.image_processor , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase_ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
lowerCamelCase_ = self.get_image_processor(do_normalize=UpperCAmelCase , padding_value=1.0 )
lowerCamelCase_ = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = image_processor(UpperCAmelCase , return_tensors='''np''' )
lowerCamelCase_ = processor(images=UpperCAmelCase , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = '''lower newer'''
lowerCamelCase_ = processor(text=UpperCAmelCase )
lowerCamelCase_ = tokenizer(UpperCAmelCase , padding='''max_length''' , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = '''lower newer'''
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = processor(text=UpperCAmelCase , images=UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase ):
processor()
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase_ = processor.batch_decode(UpperCAmelCase )
lowerCamelCase_ = tokenizer.batch_decode(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = '''lower newer'''
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = processor(text=UpperCAmelCase , images=UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 29 | 1 |
"""simple docstring"""
import itertools
import math
def lowercase ( lowerCAmelCase__ : int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCAmelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowercase ( ) -> int:
__a = 2
while True:
if is_prime(lowerCAmelCase__ ):
yield num
num += 1
def lowercase ( lowerCAmelCase__ : int = 10001 ) -> int:
return next(itertools.islice(prime_generator() , nth - 1 , lowerCAmelCase__ ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 65 |
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
lowercase_ = get_tests_dir("fixtures/test_sentencepiece_bpe_char.model")
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : int = SpeechTaTokenizer
__UpperCAmelCase : Tuple = False
__UpperCAmelCase : Dict = True
def __UpperCAmelCase ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
__a = SpeechTaTokenizer(_a )
__a = AddedToken('''<mask>''' , lstrip=_a , rstrip=_a )
__a = mask_token
tokenizer.add_special_tokens({'''mask_token''': mask_token} )
tokenizer.add_tokens(['''<ctc_blank>'''] )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCAmelCase ( self , _a ):
__a = '''this is a test'''
__a = '''this is a test'''
return input_text, output_text
def __UpperCAmelCase ( self , _a , _a=False , _a=20 , _a=5 ):
__a , __a = self.get_input_output_texts(_a )
__a = tokenizer.encode(_a , add_special_tokens=_a )
__a = tokenizer.decode(_a , clean_up_tokenization_spaces=_a )
return text, ids
def __UpperCAmelCase ( self ):
__a = '''<pad>'''
__a = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a ) , _a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a ) , _a )
def __UpperCAmelCase ( self ):
__a = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-4] , '''œ''' )
self.assertEqual(vocab_keys[-2] , '''<mask>''' )
self.assertEqual(vocab_keys[-1] , '''<ctc_blank>''' )
self.assertEqual(len(_a ) , 81 )
def __UpperCAmelCase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 79 )
def __UpperCAmelCase ( self ):
__a = self.get_tokenizers(do_lower_case=_a )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__a = tokenizer.vocab_size
__a = len(_a )
self.assertNotEqual(_a , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
__a = ['''aaaaa bbbbbb''', '''cccccccccdddddddd''']
__a = tokenizer.add_tokens(_a )
__a = tokenizer.vocab_size
__a = len(_a )
self.assertNotEqual(_a , 0 )
self.assertEqual(_a , _a )
self.assertEqual(_a , len(_a ) )
self.assertEqual(_a , all_size + len(_a ) )
__a = tokenizer.encode('''aaaaa bbbbbb low cccccccccdddddddd l''' , add_special_tokens=_a )
self.assertGreaterEqual(len(_a ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
__a = {'''eos_token''': '''>>>>|||<||<<|<<''', '''pad_token''': '''<<<<<|||>|>>>>|>'''}
__a = tokenizer.add_special_tokens(_a )
__a = tokenizer.vocab_size
__a = len(_a )
self.assertNotEqual(_a , 0 )
self.assertEqual(_a , _a )
self.assertEqual(_a , len(_a ) )
self.assertEqual(_a , all_size_a + len(_a ) )
__a = tokenizer.encode(
'''>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l''' , add_special_tokens=_a )
self.assertGreaterEqual(len(_a ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def __UpperCAmelCase ( self ):
pass
def __UpperCAmelCase ( self ):
pass
def __UpperCAmelCase ( self ):
__a = self.get_tokenizer()
__a = tokenizer.tokenize('''This is a test''' )
# fmt: off
self.assertListEqual(_a , [SPIECE_UNDERLINE, '''T''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''a''', SPIECE_UNDERLINE, '''t''', '''e''', '''s''', '''t'''] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_a ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
__a = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_a , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''92000''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''] )
__a = tokenizer.convert_tokens_to_ids(_a )
# fmt: off
self.assertListEqual(_a , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
__a = tokenizer.convert_ids_to_tokens(_a )
self.assertListEqual(
_a , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''<unk>''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''] )
@slow
def __UpperCAmelCase ( self ):
# Use custom sequence because this tokenizer does not handle numbers.
__a = [
'''Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides '''
'''general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural '''
'''Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained '''
'''models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.''',
'''BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly '''
'''conditioning on both left and right context in all layers.''',
'''The quick brown fox jumps over the lazy dog.''',
]
# fmt: off
__a = {
'''input_ids''': [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_a , model_name='''microsoft/speecht5_asr''' , revision='''c5ef64c71905caeccde0e4462ef3f9077224c524''' , sequences=_a , )
| 65 | 1 |
"""simple docstring"""
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def lowercase__(A ) ->Dict:
"""simple docstring"""
lowercase__ : Optional[Any]= filter(lambda A : p.requires_grad , model.parameters() )
lowercase__ : List[Any]= sum([np.prod(p.size() ) for p in model_parameters] )
return params
a : List[Any] = logging.getLogger(__name__)
def lowercase__(A , A ) ->Optional[Any]:
"""simple docstring"""
if metric == "rouge2":
lowercase__ : Optional[Any]= "{val_avg_rouge2:.4f}-{step_count}"
elif metric == "bleu":
lowercase__ : Optional[Any]= "{val_avg_bleu:.4f}-{step_count}"
elif metric == "em":
lowercase__ : Optional[int]= "{val_avg_em:.4f}-{step_count}"
else:
raise NotImplementedError(
f'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'''
" function." )
lowercase__ : Tuple= ModelCheckpoint(
dirpath=A , filename=A , monitor=f'''val_{metric}''' , mode="max" , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def lowercase__(A , A ) ->List[str]:
"""simple docstring"""
return EarlyStopping(
monitor=f'''val_{metric}''' , mode="min" if "loss" in metric else "max" , patience=A , verbose=A , )
class __UpperCAmelCase( pl.Callback ):
"""simple docstring"""
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ ):
'''simple docstring'''
lowercase__ : Optional[int]= {F'''lr_group_{i}''': param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(snake_case__ )
@rank_zero_only
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__=True ):
'''simple docstring'''
logger.info(F'''***** {type_path} results at step {trainer.global_step:05d} *****''' )
lowercase__ : List[Any]= trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} )
# Log results
lowercase__ : Tuple= Path(pl_module.hparams.output_dir )
if type_path == "test":
lowercase__ : str= od / "test_results.txt"
lowercase__ : Any= od / "test_generations.txt"
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
lowercase__ : Dict= od / F'''{type_path}_results/{trainer.global_step:05d}.txt'''
lowercase__ : Optional[int]= od / F'''{type_path}_generations/{trainer.global_step:05d}.txt'''
results_file.parent.mkdir(exist_ok=snake_case__ )
generations_file.parent.mkdir(exist_ok=snake_case__ )
with open(snake_case__ , "a+" ) as writer:
for key in sorted(snake_case__ ):
if key in ["log", "progress_bar", "preds"]:
continue
lowercase__ : int= metrics[key]
if isinstance(snake_case__ , torch.Tensor ):
lowercase__ : Optional[Any]= val.item()
lowercase__ : int= F'''{key}: {val:.6f}\n'''
writer.write(snake_case__ )
if not save_generations:
return
if "preds" in metrics:
lowercase__ : Dict= "\n".join(metrics["preds"] )
generations_file.open("w+" ).write(snake_case__ )
@rank_zero_only
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ ):
'''simple docstring'''
try:
lowercase__ : Tuple= pl_module.model.model.num_parameters()
except AttributeError:
lowercase__ : List[str]= pl_module.model.num_parameters()
lowercase__ : int= count_trainable_parameters(snake_case__ )
# mp stands for million parameters
trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1e6, "grad_mp": n_trainable_pars / 1e6} )
@rank_zero_only
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ ):
'''simple docstring'''
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(snake_case__ , snake_case__ , "test" )
@rank_zero_only
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ ):
'''simple docstring'''
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 218 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = "WhisperFeatureExtractor"
__lowerCamelCase = "WhisperTokenizer"
def __init__( self , snake_case__ , snake_case__ ):
'''simple docstring'''
super().__init__(snake_case__ , snake_case__ )
lowercase__ : List[Any]= self.feature_extractor
lowercase__ : Dict= False
def UpperCAmelCase_ ( self , snake_case__=None , snake_case__=None , snake_case__=True ):
'''simple docstring'''
return self.tokenizer.get_decoder_prompt_ids(task=snake_case__ , language=snake_case__ , no_timestamps=snake_case__ )
def __call__( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*snake_case__ , **snake_case__ )
lowercase__ : Tuple= kwargs.pop("audio" , snake_case__ )
lowercase__ : Optional[int]= kwargs.pop("sampling_rate" , snake_case__ )
lowercase__ : Union[str, Any]= kwargs.pop("text" , snake_case__ )
if len(snake_case__ ) > 0:
lowercase__ : List[Any]= args[0]
lowercase__ : str= args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
lowercase__ : Tuple= self.feature_extractor(snake_case__ , *snake_case__ , sampling_rate=snake_case__ , **snake_case__ )
if text is not None:
lowercase__ : Optional[Any]= self.tokenizer(snake_case__ , **snake_case__ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
lowercase__ : str= encodings["input_ids"]
return inputs
def UpperCAmelCase_ ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ )
def UpperCAmelCase_ ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
return self.tokenizer.decode(*snake_case__ , **snake_case__ )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__="np" ):
'''simple docstring'''
return self.tokenizer.get_prompt_ids(snake_case__ , return_tensors=snake_case__ )
| 218 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__a : Tuple = {'configuration_deit': ['DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DeiTConfig', 'DeiTOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : Tuple = ['DeiTFeatureExtractor']
__a : Optional[Any] = ['DeiTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : Optional[int] = [
'DEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DeiTForImageClassification',
'DeiTForImageClassificationWithTeacher',
'DeiTForMaskedImageModeling',
'DeiTModel',
'DeiTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : Dict = [
'TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDeiTForImageClassification',
'TFDeiTForImageClassificationWithTeacher',
'TFDeiTForMaskedImageModeling',
'TFDeiTModel',
'TFDeiTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
__a : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 701 |
"""simple docstring"""
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class _SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE =42
_SCREAMING_SNAKE_CASE =42
_SCREAMING_SNAKE_CASE =0.0
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =True
_SCREAMING_SNAKE_CASE =False
_SCREAMING_SNAKE_CASE =False
_SCREAMING_SNAKE_CASE =False
_SCREAMING_SNAKE_CASE =jnp.floataa
def lowercase ( self: Union[str, Any] ):
'''simple docstring'''
a__ = []
a__ = []
for i in range(self.num_layers ):
a__ = self.in_channels if i == 0 else self.out_channels
a__ = FlaxResnetBlockaD(
in_channels=__A , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__A )
a__ = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__A )
a__ = resnets
a__ = attentions
if self.add_downsample:
a__ = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self: Optional[int] , __A: Union[str, Any] , __A: str , __A: Optional[Any] , __A: Any=True ):
'''simple docstring'''
a__ = ()
for resnet, attn in zip(self.resnets , self.attentions ):
a__ = resnet(__A , __A , deterministic=__A )
a__ = attn(__A , __A , deterministic=__A )
output_states += (hidden_states,)
if self.add_downsample:
a__ = self.downsamplers_a(__A )
output_states += (hidden_states,)
return hidden_states, output_states
class _SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE =42
_SCREAMING_SNAKE_CASE =42
_SCREAMING_SNAKE_CASE =0.0
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =True
_SCREAMING_SNAKE_CASE =jnp.floataa
def lowercase ( self: Union[str, Any] ):
'''simple docstring'''
a__ = []
for i in range(self.num_layers ):
a__ = self.in_channels if i == 0 else self.out_channels
a__ = FlaxResnetBlockaD(
in_channels=__A , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__A )
a__ = resnets
if self.add_downsample:
a__ = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self: Dict , __A: int , __A: Dict , __A: Optional[Any]=True ):
'''simple docstring'''
a__ = ()
for resnet in self.resnets:
a__ = resnet(__A , __A , deterministic=__A )
output_states += (hidden_states,)
if self.add_downsample:
a__ = self.downsamplers_a(__A )
output_states += (hidden_states,)
return hidden_states, output_states
class _SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE =42
_SCREAMING_SNAKE_CASE =42
_SCREAMING_SNAKE_CASE =42
_SCREAMING_SNAKE_CASE =0.0
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =True
_SCREAMING_SNAKE_CASE =False
_SCREAMING_SNAKE_CASE =False
_SCREAMING_SNAKE_CASE =False
_SCREAMING_SNAKE_CASE =jnp.floataa
def lowercase ( self: Optional[int] ):
'''simple docstring'''
a__ = []
a__ = []
for i in range(self.num_layers ):
a__ = self.in_channels if (i == self.num_layers - 1) else self.out_channels
a__ = self.prev_output_channel if i == 0 else self.out_channels
a__ = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__A )
a__ = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__A )
a__ = resnets
a__ = attentions
if self.add_upsample:
a__ = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self: Any , __A: Optional[int] , __A: List[Any] , __A: List[str] , __A: Optional[Any] , __A: Any=True ):
'''simple docstring'''
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
a__ = res_hidden_states_tuple[-1]
a__ = res_hidden_states_tuple[:-1]
a__ = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
a__ = resnet(__A , __A , deterministic=__A )
a__ = attn(__A , __A , deterministic=__A )
if self.add_upsample:
a__ = self.upsamplers_a(__A )
return hidden_states
class _SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE =42
_SCREAMING_SNAKE_CASE =42
_SCREAMING_SNAKE_CASE =42
_SCREAMING_SNAKE_CASE =0.0
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =True
_SCREAMING_SNAKE_CASE =jnp.floataa
def lowercase ( self: str ):
'''simple docstring'''
a__ = []
for i in range(self.num_layers ):
a__ = self.in_channels if (i == self.num_layers - 1) else self.out_channels
a__ = self.prev_output_channel if i == 0 else self.out_channels
a__ = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__A )
a__ = resnets
if self.add_upsample:
a__ = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self: Tuple , __A: Optional[Any] , __A: Optional[Any] , __A: Union[str, Any] , __A: Dict=True ):
'''simple docstring'''
for resnet in self.resnets:
# pop res hidden states
a__ = res_hidden_states_tuple[-1]
a__ = res_hidden_states_tuple[:-1]
a__ = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
a__ = resnet(__A , __A , deterministic=__A )
if self.add_upsample:
a__ = self.upsamplers_a(__A )
return hidden_states
class _SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE =42
_SCREAMING_SNAKE_CASE =0.0
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =False
_SCREAMING_SNAKE_CASE =False
_SCREAMING_SNAKE_CASE =jnp.floataa
def lowercase ( self: Tuple ):
'''simple docstring'''
a__ = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
a__ = []
for _ in range(self.num_layers ):
a__ = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__A )
a__ = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__A )
a__ = resnets
a__ = attentions
def __call__( self: Any , __A: Optional[int] , __A: int , __A: Tuple , __A: str=True ):
'''simple docstring'''
a__ = self.resnets[0](__A , __A )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
a__ = attn(__A , __A , deterministic=__A )
a__ = resnet(__A , __A , deterministic=__A )
return hidden_states
| 200 | 0 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
__A : Optional[int] = logging.get_logger(__name__)
__A : Optional[int] = {"vocab_file": "spiece.model"}
__A : List[Any] = {
"vocab_file": {
"TsinghuaAI/CPM-Generate": "https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model",
}
}
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any]=False , UpperCamelCase__ : Dict=True , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : Dict="<s>" , UpperCamelCase__ : str="</s>" , UpperCamelCase__ : Union[str, Any]="<unk>" , UpperCamelCase__ : Optional[int]="<sep>" , UpperCamelCase__ : Optional[int]="<pad>" , UpperCamelCase__ : Optional[int]="<cls>" , UpperCamelCase__ : List[str]="<mask>" , UpperCamelCase__ : Optional[Any]=["<eop>", "<eod>"] , UpperCamelCase__ : Optional[Dict[str, Any]] = None , **UpperCamelCase__ : Dict , ):
A__ : List[str] =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token
A__ : Tuple ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=UpperCamelCase__ , remove_space=UpperCamelCase__ , keep_accents=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase__ , )
A__ : Dict =3
A__ : int =do_lower_case
A__ : str =remove_space
A__ : Optional[Any] =keep_accents
A__ : int =vocab_file
A__ : Dict =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase__ )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"You need to install jieba to use CpmTokenizer or CpmTokenizerFast. "
"See https://pypi.org/project/jieba/ for installation." )
A__ : Union[str, Any] =jieba
A__ : List[str] =str.maketrans(" \n" , "\u2582\u2583" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def _UpperCAmelCase ( self : Union[str, Any] ):
return len(self.sp_model )
def _UpperCAmelCase ( self : Optional[int] ):
A__ : Any ={self.convert_ids_to_tokens(UpperCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[str] ):
A__ : Union[str, Any] =self.__dict__.copy()
A__ : Tuple =None
return state
def __setstate__( self : Tuple , UpperCamelCase__ : int ):
A__ : Union[str, Any] =d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
A__ : Optional[int] ={}
A__ : Union[str, Any] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase__ : Dict ):
if self.remove_space:
A__ : Optional[int] =" ".join(inputs.strip().split() )
else:
A__ : Optional[Any] =inputs
A__ : Any =outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
A__ : Optional[Any] =unicodedata.normalize("NFKD" , UpperCamelCase__ )
A__ : Tuple ="".join([c for c in outputs if not unicodedata.combining(UpperCamelCase__ )] )
if self.do_lower_case:
A__ : str =outputs.lower()
return outputs
def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : str ):
A__ : Optional[int] =self.preprocess_text(UpperCamelCase__ )
A__ : Dict =self.sp_model.encode(UpperCamelCase__ , out_type=UpperCamelCase__ )
A__ : List[str] =[]
for piece in pieces:
if len(UpperCamelCase__ ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
A__ : str =self.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCamelCase__ , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
A__ : Union[str, Any] =cur_pieces[1:]
else:
A__ : List[str] =cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(UpperCamelCase__ )
else:
new_pieces.append(UpperCamelCase__ )
return new_pieces
def _UpperCAmelCase ( self : int , UpperCamelCase__ : str ):
return self.sp_model.PieceToId(UpperCamelCase__ )
def _UpperCAmelCase ( self : List[str] , UpperCamelCase__ : List[Any] ):
return self.sp_model.IdToPiece(UpperCamelCase__ )
def _UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase__ : str ):
A__ : Optional[int] ="".join(UpperCamelCase__ ).replace(UpperCamelCase__ , " " ).strip()
return out_string
def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
A__ : List[str] =[self.sep_token_id]
A__ : str =[self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None , UpperCamelCase__ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ )
if token_ids_a is not None:
return ([0] * len(UpperCamelCase__ )) + [1] + ([0] * len(UpperCamelCase__ )) + [1, 1]
return ([0] * len(UpperCamelCase__ )) + [1, 1]
def _UpperCAmelCase ( self : int , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
A__ : List[str] =[self.sep_token_id]
A__ : Optional[Any] =[2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _UpperCAmelCase ( self : Dict , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
A__ : Tuple =os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase__ , "wb" ) as fi:
A__ : Optional[Any] =self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase__ )
return (out_vocab_file,)
def _UpperCAmelCase ( self : str , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : int ):
A__ : List[Any] =super()._decode(*UpperCamelCase__ , **UpperCamelCase__ )
A__ : Union[str, Any] =text.replace(" " , "" ).replace("\u2582" , " " ).replace("\u2583" , "\n" )
return text
| 656 | """simple docstring"""
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
def _UpperCAmelCase ( self : Dict ):
A__ : Optional[Any] =self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(UpperCamelCase__ , "hidden_sizes" ) )
self.parent.assertTrue(hasattr(UpperCamelCase__ , "num_attention_heads" ) )
self.parent.assertTrue(hasattr(UpperCamelCase__ , "num_encoder_blocks" ) )
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any]=13 , UpperCamelCase__ : Tuple=64 , UpperCamelCase__ : Optional[int]=3 , UpperCamelCase__ : Union[str, Any]=4 , UpperCamelCase__ : Dict=[2, 2, 2, 2] , UpperCamelCase__ : Union[str, Any]=[8, 4, 2, 1] , UpperCamelCase__ : Tuple=[16, 32, 64, 128] , UpperCamelCase__ : Optional[int]=[1, 4, 8, 16] , UpperCamelCase__ : Any=[1, 2, 4, 8] , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : str=True , UpperCamelCase__ : Dict="gelu" , UpperCamelCase__ : str=0.1 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : List[str]=0.02 , UpperCamelCase__ : int=3 , UpperCamelCase__ : Optional[Any]=None , ):
A__ : Tuple =parent
A__ : List[Any] =batch_size
A__ : List[Any] =image_size
A__ : Union[str, Any] =num_channels
A__ : Optional[int] =num_encoder_blocks
A__ : Any =sr_ratios
A__ : Any =depths
A__ : List[Any] =hidden_sizes
A__ : List[Any] =downsampling_rates
A__ : List[str] =num_attention_heads
A__ : int =is_training
A__ : List[Any] =use_labels
A__ : Any =hidden_act
A__ : Dict =hidden_dropout_prob
A__ : int =attention_probs_dropout_prob
A__ : List[Any] =initializer_range
A__ : Tuple =num_labels
A__ : List[Any] =scope
def _UpperCAmelCase ( self : Optional[int] ):
A__ : List[str] =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ : Any =None
if self.use_labels:
A__ : Tuple =ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
A__ : List[Any] =self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self : Tuple ):
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def _UpperCAmelCase ( self : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int ):
A__ : Any =SegformerModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ : Dict =model(UpperCamelCase__ )
A__ : Optional[int] =self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def _UpperCAmelCase ( self : str , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] ):
A__ : str =self.num_labels
A__ : Optional[Any] =SegformerForSemanticSegmentation(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ : Optional[Any] =model(UpperCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
A__ : List[Any] =model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def _UpperCAmelCase ( self : int , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str ):
A__ : Tuple =1
A__ : Tuple =SegformerForSemanticSegmentation(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ : List[str] =torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(UpperCamelCase__ )
A__ : Dict =model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertGreater(result.loss , 0.0 )
def _UpperCAmelCase ( self : str ):
A__ : Union[str, Any] =self.prepare_config_and_inputs()
A__ , A__ , A__ : Tuple =config_and_inputs
A__ : Tuple ={"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase):
'''simple docstring'''
__magic_name__ : Dict = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
__magic_name__ : Optional[int] = (
{
"""feature-extraction""": SegformerModel,
"""image-classification""": SegformerForImageClassification,
"""image-segmentation""": SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__magic_name__ : Dict = True
__magic_name__ : List[str] = False
__magic_name__ : Optional[Any] = False
__magic_name__ : str = False
def _UpperCAmelCase ( self : Union[str, Any] ):
A__ : Union[str, Any] =SegformerModelTester(self )
A__ : Tuple =SegformerConfigTester(self , config_class=UpperCamelCase__ )
def _UpperCAmelCase ( self : str ):
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self : Dict ):
A__ : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def _UpperCAmelCase ( self : Tuple ):
A__ : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*UpperCamelCase__ )
def _UpperCAmelCase ( self : Union[str, Any] ):
A__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*UpperCamelCase__ )
@unittest.skip("SegFormer does not use inputs_embeds" )
def _UpperCAmelCase ( self : Dict ):
pass
@unittest.skip("SegFormer does not have get_input_embeddings method and get_output_embeddings methods" )
def _UpperCAmelCase ( self : Tuple ):
pass
def _UpperCAmelCase ( self : List[str] ):
A__ , A__ : Tuple =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : int =model_class(UpperCamelCase__ )
A__ : Optional[int] =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ : Optional[int] =[*signature.parameters.keys()]
A__ : List[str] =["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def _UpperCAmelCase ( self : str ):
A__ , A__ : Tuple =self.model_tester.prepare_config_and_inputs_for_common()
A__ : Union[str, Any] =True
for model_class in self.all_model_classes:
A__ : Optional[Any] =True
A__ : Union[str, Any] =False
A__ : str =True
A__ : Optional[int] =model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : str =model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A__ : Any =outputs.attentions
A__ : List[str] =sum(self.model_tester.depths )
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
A__ : Dict =True
A__ : str =model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : Any =model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A__ : Union[str, Any] =outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
# verify the first attentions (first block, first layer)
A__ : List[Any] =(self.model_tester.image_size // 4) ** 2
A__ : Tuple =(self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
A__ : Tuple =(self.model_tester.image_size // 32) ** 2
A__ : Optional[Any] =(self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
A__ : int =len(UpperCamelCase__ )
# Check attention is always last and order is fine
A__ : Optional[Any] =True
A__ : Any =True
A__ : Union[str, Any] =model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : Optional[Any] =model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(out_len + 1 , len(UpperCamelCase__ ) )
A__ : Optional[Any] =outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
# verify the first attentions (first block, first layer)
A__ : Union[str, Any] =(self.model_tester.image_size // 4) ** 2
A__ : Tuple =(self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def _UpperCAmelCase ( self : List[Any] ):
def check_hidden_states_output(UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple ):
A__ : Optional[Any] =model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : List[Any] =model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A__ : Optional[Any] =outputs.hidden_states
A__ : int =self.model_tester.num_encoder_blocks
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
A__ , A__ : List[str] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : Optional[Any] =True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ : str =True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def _UpperCAmelCase ( self : Optional[int] ):
if not self.model_tester.is_training:
return
A__ , A__ : int =self.model_tester.prepare_config_and_inputs_for_common()
A__ : List[Any] =True
for model_class in self.all_model_classes:
if model_class in get_values(UpperCamelCase__ ):
continue
A__ : List[Any] =model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.train()
A__ : int =self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
A__ : Union[str, Any] =model(**UpperCamelCase__ ).loss
loss.backward()
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _UpperCAmelCase ( self : Tuple ):
pass
@slow
def _UpperCAmelCase ( self : Tuple ):
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : Tuple =SegformerModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def lowercase ( ):
"""simple docstring"""
A__ : List[Any] =Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
class __lowerCAmelCase ( unittest.TestCase):
'''simple docstring'''
@slow
def _UpperCAmelCase ( self : Tuple ):
# only resize + normalize
A__ : List[Any] =SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=UpperCamelCase__ , align=UpperCamelCase__ , do_random_crop=UpperCamelCase__ )
A__ : Union[str, Any] =SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512" ).to(
UpperCamelCase__ )
A__ : Union[str, Any] =prepare_img()
A__ : Union[str, Any] =image_processor(images=UpperCamelCase__ , return_tensors="pt" )
A__ : int =encoded_inputs.pixel_values.to(UpperCamelCase__ )
with torch.no_grad():
A__ : int =model(UpperCamelCase__ )
A__ : Dict =torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
A__ : Optional[int] =torch.tensor(
[
[[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , UpperCamelCase__ , atol=1E-4 ) )
@slow
def _UpperCAmelCase ( self : Union[str, Any] ):
# only resize + normalize
A__ : Dict =SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=UpperCamelCase__ , align=UpperCamelCase__ , do_random_crop=UpperCamelCase__ )
A__ : int =SegformerForSemanticSegmentation.from_pretrained(
"nvidia/segformer-b1-finetuned-cityscapes-1024-1024" ).to(UpperCamelCase__ )
A__ : Tuple =prepare_img()
A__ : str =image_processor(images=UpperCamelCase__ , return_tensors="pt" )
A__ : Optional[int] =encoded_inputs.pixel_values.to(UpperCamelCase__ )
with torch.no_grad():
A__ : int =model(UpperCamelCase__ )
A__ : List[str] =torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
A__ : List[Any] =torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]],
] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , UpperCamelCase__ , atol=1E-1 ) )
@slow
def _UpperCAmelCase ( self : int ):
# only resize + normalize
A__ : Optional[Any] =SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=UpperCamelCase__ , align=UpperCamelCase__ , do_random_crop=UpperCamelCase__ )
A__ : List[Any] =SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512" ).to(
UpperCamelCase__ )
A__ : str =prepare_img()
A__ : Dict =image_processor(images=UpperCamelCase__ , return_tensors="pt" )
A__ : Any =encoded_inputs.pixel_values.to(UpperCamelCase__ )
with torch.no_grad():
A__ : Dict =model(UpperCamelCase__ )
A__ : Any =outputs.logits.detach().cpu()
A__ : Union[str, Any] =image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase__ , target_sizes=[(500, 300)] )
A__ : List[str] =torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , UpperCamelCase__ )
A__ : int =image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase__ )
A__ : Tuple =torch.Size((128, 128) )
self.assertEqual(segmentation[0].shape , UpperCamelCase__ )
| 656 | 1 |
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class __A:
@staticmethod
def lowercase__ ( *__UpperCamelCase : Dict , **__UpperCamelCase : str ):
pass
def __lowerCAmelCase ( UpperCAmelCase__ : Tuple ) -> Optional[int]:
lowerCamelCase_ = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class __A( unittest.TestCase ):
SCREAMING_SNAKE_CASE = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def lowercase__ ( self : List[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : int , __UpperCamelCase : int ):
lowerCamelCase_ = DepthEstimationPipeline(model=_A , image_processor=_A )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def lowercase__ ( self : str , __UpperCamelCase : Optional[Any] , __UpperCamelCase : str ):
lowerCamelCase_ = depth_estimator("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
self.assertEqual({"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )} , _A )
import datasets
lowerCamelCase_ = datasets.load_dataset("""hf-internal-testing/fixtures_image_utils""" , """image""" , split="""test""" )
lowerCamelCase_ = depth_estimator(
[
Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ),
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
# RGBA
dataset[0]["""file"""],
# LA
dataset[1]["""file"""],
# L
dataset[2]["""file"""],
] )
self.assertEqual(
[
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
] , _A , )
@require_tf
@unittest.skip("""Depth estimation is not implemented in TF""" )
def lowercase__ ( self : List[Any] ):
pass
@slow
@require_torch
def lowercase__ ( self : List[Any] ):
lowerCamelCase_ = """Intel/dpt-large"""
lowerCamelCase_ = pipeline("""depth-estimation""" , model=_A )
lowerCamelCase_ = depth_estimator("""http://images.cocodataset.org/val2017/000000039769.jpg""" )
lowerCamelCase_ = hashimage(outputs["""depth"""] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs["""predicted_depth"""].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs["""predicted_depth"""].min().item() ) , 2.662 )
@require_torch
def lowercase__ ( self : Optional[int] ):
# This is highly irregular to have no small tests.
self.skipTest("""There is not hf-internal-testing tiny model for either GLPN nor DPT""" ) | 714 |
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __A:
def __init__( self : int , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Union[str, Any]=3 , __UpperCamelCase : int=3_2 , __UpperCamelCase : Any=3 , __UpperCamelCase : List[str]=1_0 , __UpperCamelCase : int=[1_0, 2_0, 3_0, 4_0] , __UpperCamelCase : List[str]=[1, 1, 2, 1] , __UpperCamelCase : str=True , __UpperCamelCase : List[str]=True , __UpperCamelCase : Dict="relu" , __UpperCamelCase : int=3 , __UpperCamelCase : Dict=None , ):
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = image_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = embeddings_size
lowerCamelCase_ = hidden_sizes
lowerCamelCase_ = depths
lowerCamelCase_ = is_training
lowerCamelCase_ = use_labels
lowerCamelCase_ = hidden_act
lowerCamelCase_ = num_labels
lowerCamelCase_ = scope
lowerCamelCase_ = len(__UpperCamelCase )
def lowercase__ ( self : Any ):
lowerCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase_ = self.get_config()
return config, pixel_values, labels
def lowercase__ ( self : str ):
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def lowercase__ ( self : Optional[int] , __UpperCamelCase : int , __UpperCamelCase : Tuple , __UpperCamelCase : Union[str, Any] ):
lowerCamelCase_ = TFRegNetModel(config=__UpperCamelCase )
lowerCamelCase_ = model(__UpperCamelCase , training=__UpperCamelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : int , __UpperCamelCase : List[Any] , __UpperCamelCase : List[str] ):
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = TFRegNetForImageClassification(__UpperCamelCase )
lowerCamelCase_ = model(__UpperCamelCase , labels=__UpperCamelCase , training=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self : int ):
lowerCamelCase_ = self.prepare_config_and_inputs()
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = config_and_inputs
lowerCamelCase_ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class __A( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
SCREAMING_SNAKE_CASE = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
SCREAMING_SNAKE_CASE = (
{'''feature-extraction''': TFRegNetModel, '''image-classification''': TFRegNetForImageClassification}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def lowercase__ ( self : Tuple ):
lowerCamelCase_ = TFRegNetModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase )
def lowercase__ ( self : Union[str, Any] ):
return
@unittest.skip(reason="""RegNet does not use inputs_embeds""" )
def lowercase__ ( self : Optional[int] ):
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
@slow
def lowercase__ ( self : Tuple ):
super().test_keras_fit()
@unittest.skip(reason="""RegNet does not support input and output embeddings""" )
def lowercase__ ( self : Optional[Any] ):
pass
def lowercase__ ( self : Dict ):
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(__UpperCamelCase )
lowerCamelCase_ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ = [*signature.parameters.keys()]
lowerCamelCase_ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def lowercase__ ( self : Any ):
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def lowercase__ ( self : int ):
def check_hidden_states_output(__UpperCamelCase : Union[str, Any] , __UpperCamelCase : str , __UpperCamelCase : Any ):
lowerCamelCase_ = model_class(__UpperCamelCase )
lowerCamelCase_ = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) , training=__UpperCamelCase )
lowerCamelCase_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase_ = self.model_tester.num_stages
self.assertEqual(len(__UpperCamelCase ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowerCamelCase_ = layer_type
lowerCamelCase_ = True
check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase_ = True
check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def lowercase__ ( self : Optional[Any] ):
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(__UpperCamelCase : List[str] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Tuple , __UpperCamelCase : List[Any]={} ):
lowerCamelCase_ = model(__UpperCamelCase , return_dict=__UpperCamelCase , **__UpperCamelCase )
lowerCamelCase_ = model(__UpperCamelCase , return_dict=__UpperCamelCase , **__UpperCamelCase ).to_tuple()
def recursive_check(__UpperCamelCase : Union[str, Any] , __UpperCamelCase : Dict ):
if isinstance(__UpperCamelCase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(__UpperCamelCase , __UpperCamelCase ):
recursive_check(__UpperCamelCase , __UpperCamelCase )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(__UpperCamelCase , __UpperCamelCase ) ) , msg=(
"""Tuple and dict output are not equal. Difference:"""
F''' {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}'''
) , )
recursive_check(__UpperCamelCase , __UpperCamelCase )
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(__UpperCamelCase )
lowerCamelCase_ = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase )
lowerCamelCase_ = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase )
check_equivalence(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
lowerCamelCase_ = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
lowerCamelCase_ = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
check_equivalence(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
lowerCamelCase_ = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase )
lowerCamelCase_ = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase )
check_equivalence(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , {"""output_hidden_states""": True} )
lowerCamelCase_ = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
lowerCamelCase_ = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
check_equivalence(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , {"""output_hidden_states""": True} )
def lowercase__ ( self : Union[str, Any] ):
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase )
@slow
def lowercase__ ( self : List[Any] ):
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = TFRegNetModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def __lowerCAmelCase ( ) -> Optional[int]:
lowerCamelCase_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class __A( unittest.TestCase ):
@cached_property
def lowercase__ ( self : Union[str, Any] ):
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def lowercase__ ( self : List[str] ):
lowerCamelCase_ = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
lowerCamelCase_ = self.default_image_processor
lowerCamelCase_ = prepare_img()
lowerCamelCase_ = image_processor(images=__UpperCamelCase , return_tensors="""tf""" )
# forward pass
lowerCamelCase_ = model(**__UpperCamelCase , training=__UpperCamelCase )
# verify the logits
lowerCamelCase_ = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
lowerCamelCase_ = tf.constant([-0.4180, -1.5051, -3.4836] )
tf.debugging.assert_near(outputs.logits[0, :3] , __UpperCamelCase , atol=1E-4 )
| 103 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""s-JoL/Open-Llama-V1""": """https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json""",
}
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = '''open-llama'''
def __init__( self : List[str] , _A : int=10_0000 , _A : Dict=4096 , _A : int=1_1008 , _A : str=32 , _A : str=32 , _A : Dict="silu" , _A : List[str]=2048 , _A : Optional[Any]=0.02 , _A : Union[str, Any]=1e-6 , _A : Optional[Any]=True , _A : Tuple=0 , _A : List[Any]=1 , _A : str=2 , _A : str=False , _A : Any=True , _A : List[Any]=0.1 , _A : Optional[int]=0.1 , _A : Any=True , _A : Any=True , _A : Optional[int]=None , **_A : Optional[int] , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = vocab_size
__SCREAMING_SNAKE_CASE : Optional[int] = max_position_embeddings
__SCREAMING_SNAKE_CASE : Dict = hidden_size
__SCREAMING_SNAKE_CASE : Tuple = intermediate_size
__SCREAMING_SNAKE_CASE : Optional[Any] = num_hidden_layers
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads
__SCREAMING_SNAKE_CASE : str = hidden_act
__SCREAMING_SNAKE_CASE : Any = initializer_range
__SCREAMING_SNAKE_CASE : Dict = rms_norm_eps
__SCREAMING_SNAKE_CASE : int = use_cache
__SCREAMING_SNAKE_CASE : List[str] = kwargs.pop(
'''use_memorry_efficient_attention''' , _A )
__SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : str = attention_dropout_prob
__SCREAMING_SNAKE_CASE : List[str] = use_stable_embedding
__SCREAMING_SNAKE_CASE : Dict = shared_input_output_embedding
__SCREAMING_SNAKE_CASE : int = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , tie_word_embeddings=_A , **_A , )
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _A ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
F'''got {self.rope_scaling}''' )
__SCREAMING_SNAKE_CASE : List[str] = self.rope_scaling.get('''type''' , _A )
__SCREAMING_SNAKE_CASE : Any = self.rope_scaling.get('''factor''' , _A )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(_A , _A ) or rope_scaling_factor <= 1.0:
raise ValueError(F'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 74 |
__lowerCamelCase = {
0: """0""",
1: """1""",
2: """2""",
3: """3""",
4: """4""",
5: """5""",
6: """6""",
7: """7""",
8: """8""",
9: """9""",
10: """a""",
11: """b""",
12: """c""",
13: """d""",
14: """e""",
15: """f""",
}
def UpperCamelCase ( __lowerCamelCase : float ):
assert type(__lowerCamelCase ) in (int, float) and decimal == int(__lowerCamelCase )
snake_case : str = int(__lowerCamelCase )
snake_case : str = ""
snake_case : str = False
if decimal < 0:
snake_case : Optional[Any] = True
decimal *= -1
while decimal > 0:
snake_case , snake_case : Union[str, Any] = divmod(__lowerCamelCase , 16 )
snake_case : Tuple = values[remainder] + hexadecimal
snake_case : Tuple = "0x" + hexadecimal
if negative:
snake_case : Any = "-" + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 204 | 0 |
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
UpperCAmelCase_ : str = logging.get_logger(__name__)
class __A ( UpperCamelCase__ ):
UpperCamelCase = ["""input_values""", """attention_mask"""]
def __init__( self :str , __snake_case :int = 1 , __snake_case :int = 1_60_00 , __snake_case :float = 0.0 , __snake_case :bool = False , __snake_case :int = 80 , __snake_case :int = 16 , __snake_case :int = 64 , __snake_case :str = "hann_window" , __snake_case :float = 1.0 , __snake_case :float = 80 , __snake_case :float = 76_00 , __snake_case :float = 1E-10 , __snake_case :int = 2 , __snake_case :bool = True , **__snake_case :List[Any] , ):
'''simple docstring'''
super().__init__(feature_size=__snake_case , sampling_rate=__snake_case , padding_value=__snake_case , **__snake_case )
__magic_name__ : Optional[int] =do_normalize
__magic_name__ : Any =return_attention_mask
__magic_name__ : List[Any] =num_mel_bins
__magic_name__ : str =hop_length
__magic_name__ : str =win_length
__magic_name__ : List[Any] =win_function
__magic_name__ : Optional[int] =frame_signal_scale
__magic_name__ : List[str] =fmin
__magic_name__ : int =fmax
__magic_name__ : Optional[int] =mel_floor
__magic_name__ : Any =reduction_factor
__magic_name__ : int =win_length * sampling_rate // 10_00
__magic_name__ : List[str] =hop_length * sampling_rate // 10_00
__magic_name__ : Tuple =optimal_fft_length(self.sample_size )
__magic_name__ : List[Any] =(self.n_fft // 2) + 1
__magic_name__ : int =window_function(window_length=self.sample_size , name=self.win_function , periodic=__snake_case )
__magic_name__ : Union[str, Any] =mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm="""slaney""" , mel_scale="""slaney""" , )
if frame_signal_scale != 1.0:
warnings.warn(
"""The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers""" , __snake_case , )
if reduction_factor != 2.0:
warnings.warn(
"""The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers""" , __snake_case , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def A__ ( __snake_case :List[np.ndarray] , __snake_case :List[np.ndarray] , __snake_case :float = 0.0 ):
'''simple docstring'''
if attention_mask is not None:
__magic_name__ : Tuple =np.array(__snake_case , np.intaa )
__magic_name__ : Optional[int] =[]
for vector, length in zip(__snake_case , attention_mask.sum(-1 ) ):
__magic_name__ : Union[str, Any] =(vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
__magic_name__ : str =padding_value
normed_input_values.append(__snake_case )
else:
__magic_name__ : Any =[(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def A__ ( self :Dict , __snake_case :np.ndarray , ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =spectrogram(
__snake_case , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel="""log10""" , )
return log_mel_spec.T
def __call__( self :int , __snake_case :Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , __snake_case :Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , __snake_case :Union[bool, str, PaddingStrategy] = False , __snake_case :Optional[int] = None , __snake_case :bool = False , __snake_case :Optional[int] = None , __snake_case :Optional[bool] = None , __snake_case :Optional[Union[str, TensorType]] = None , __snake_case :Optional[int] = None , **__snake_case :Any , ):
'''simple docstring'''
if audio is None and audio_target is None:
raise ValueError("""You must provide either `audio` or `audio_target` values.""" )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
f" {self.sampling_rate}. Please make sure that the provided audio input was sampled with"
f" {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
"""It is strongly recommended to pass the ``sampling_rate`` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
if audio is not None:
__magic_name__ : Union[str, Any] =self._process_audio(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , **__snake_case , )
else:
__magic_name__ : List[str] =None
if audio_target is not None:
__magic_name__ : Any =self._process_audio(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , **__snake_case , )
if inputs is None:
return inputs_target
else:
__magic_name__ : List[str] =inputs_target["""input_values"""]
__magic_name__ : List[str] =inputs_target.get("""attention_mask""" )
if decoder_attention_mask is not None:
__magic_name__ : str =decoder_attention_mask
return inputs
def A__ ( self :List[str] , __snake_case :Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __snake_case :bool = False , __snake_case :Union[bool, str, PaddingStrategy] = False , __snake_case :Optional[int] = None , __snake_case :bool = False , __snake_case :Optional[int] = None , __snake_case :Optional[bool] = None , __snake_case :Optional[Union[str, TensorType]] = None , **__snake_case :Union[str, Any] , ):
'''simple docstring'''
__magic_name__ : Optional[Any] =isinstance(__snake_case , np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(f"Only mono-channel audio is supported for input to {self}" )
__magic_name__ : Optional[int] =is_batched_numpy or (
isinstance(__snake_case , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__magic_name__ : Any =[np.asarray(__snake_case , dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(__snake_case , np.ndarray ):
__magic_name__ : Dict =np.asarray(__snake_case , dtype=np.floataa )
elif isinstance(__snake_case , np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
__magic_name__ : List[str] =speech.astype(np.floataa )
# always return batch
if not is_batched:
__magic_name__ : int =[speech]
# needed to make pad() work on spectrogram inputs
__magic_name__ : List[str] =self.feature_size
# convert into correct format for padding
if is_target:
__magic_name__ : Optional[int] =[self._extract_mel_features(__snake_case ) for waveform in speech]
__magic_name__ : Optional[int] =BatchFeature({"""input_values""": features} )
__magic_name__ : List[str] =self.num_mel_bins
else:
__magic_name__ : int =BatchFeature({"""input_values""": speech} )
__magic_name__ : List[str] =self.pad(
__snake_case , padding=__snake_case , max_length=__snake_case , truncation=__snake_case , pad_to_multiple_of=__snake_case , return_attention_mask=__snake_case , **__snake_case , )
__magic_name__ : Optional[int] =feature_size_hack
# convert input values to correct format
__magic_name__ : Tuple =padded_inputs["""input_values"""]
if not isinstance(input_values[0] , np.ndarray ):
__magic_name__ : Optional[Any] =[np.asarray(__snake_case , dtype=np.floataa ) for array in input_values]
elif (
not isinstance(__snake_case , np.ndarray )
and isinstance(input_values[0] , np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
__magic_name__ : Union[str, Any] =[array.astype(np.floataa ) for array in input_values]
elif isinstance(__snake_case , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
__magic_name__ : Any =input_values.astype(np.floataa )
# convert attention_mask to correct format
__magic_name__ : Optional[Any] =padded_inputs.get("""attention_mask""" )
if attention_mask is not None:
__magic_name__ : str =[np.asarray(__snake_case , dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
__magic_name__ : Optional[Any] =(
attention_mask
if self._get_padding_strategies(__snake_case , max_length=__snake_case ) is not PaddingStrategy.DO_NOT_PAD
else None
)
__magic_name__ : Dict =self.zero_mean_unit_var_norm(
padded_inputs["""input_values"""] , attention_mask=__snake_case , padding_value=self.padding_value )
if return_tensors is not None:
__magic_name__ : Any =padded_inputs.convert_to_tensors(__snake_case )
return padded_inputs
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
__magic_name__ : Dict =super().to_dict()
# Don't serialize these as they are derived from the other properties.
__magic_name__ : Optional[Any] =["""window""", """mel_filters""", """sample_size""", """sample_stride""", """n_fft""", """n_freqs"""]
for name in names:
if name in output:
del output[name]
return output
| 367 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class __A :
def __init__( self :Optional[int] , __snake_case :Optional[Any] , __snake_case :List[Any]=13 , __snake_case :str=7 , __snake_case :Tuple=True , __snake_case :Dict=True , __snake_case :Optional[Any]=True , __snake_case :str=True , __snake_case :int=99 , __snake_case :int=32 , __snake_case :Dict=2 , __snake_case :Optional[Any]=4 , __snake_case :Dict=37 , __snake_case :Optional[int]="gelu" , __snake_case :Tuple=0.1 , __snake_case :Tuple=0.1 , __snake_case :int=5_12 , __snake_case :int=16 , __snake_case :int=2 , __snake_case :Optional[int]=0.02 , __snake_case :Union[str, Any]=3 , __snake_case :Any=4 , __snake_case :str=None , __snake_case :str=0 , ):
'''simple docstring'''
__magic_name__ : Optional[int] =parent
__magic_name__ : int =batch_size
__magic_name__ : Any =seq_length
__magic_name__ : List[str] =is_training
__magic_name__ : Any =use_input_mask
__magic_name__ : Union[str, Any] =use_token_type_ids
__magic_name__ : Union[str, Any] =use_labels
__magic_name__ : Optional[Any] =vocab_size
__magic_name__ : Optional[Any] =hidden_size
__magic_name__ : Optional[Any] =num_hidden_layers
__magic_name__ : Optional[Any] =num_attention_heads
__magic_name__ : Tuple =intermediate_size
__magic_name__ : Tuple =hidden_act
__magic_name__ : Tuple =hidden_dropout_prob
__magic_name__ : Any =attention_probs_dropout_prob
__magic_name__ : Union[str, Any] =max_position_embeddings
__magic_name__ : int =type_vocab_size
__magic_name__ : Optional[Any] =type_sequence_label_size
__magic_name__ : Union[str, Any] =initializer_range
__magic_name__ : Optional[Any] =num_labels
__magic_name__ : Any =num_choices
__magic_name__ : Optional[Any] =scope
__magic_name__ : str =projection_dim
def A__ ( self :Dict ):
'''simple docstring'''
__magic_name__ : Optional[int] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ : Union[str, Any] =None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
__magic_name__ : List[Any] =random_attention_mask([self.batch_size, self.seq_length] )
__magic_name__ : Optional[int] =None
if self.use_token_type_ids:
__magic_name__ : str =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__magic_name__ : List[str] =None
__magic_name__ : List[Any] =None
__magic_name__ : List[str] =None
if self.use_labels:
__magic_name__ : str =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ : Optional[int] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__magic_name__ : Union[str, Any] =ids_tensor([self.batch_size] , self.num_choices )
__magic_name__ : Optional[int] =BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__snake_case , initializer_range=self.initializer_range , )
__magic_name__ : Dict =DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self :List[Any] , __snake_case :List[Any] , __snake_case :List[str] , __snake_case :Any , __snake_case :Any , __snake_case :Optional[Any] , __snake_case :Optional[Any] , __snake_case :List[str] ):
'''simple docstring'''
__magic_name__ : List[Any] =TFDPRContextEncoder(config=__snake_case )
__magic_name__ : Union[str, Any] =model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case )
__magic_name__ : str =model(__snake_case , token_type_ids=__snake_case )
__magic_name__ : Optional[int] =model(__snake_case )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def A__ ( self :int , __snake_case :str , __snake_case :Tuple , __snake_case :Optional[Any] , __snake_case :Optional[Any] , __snake_case :Tuple , __snake_case :List[Any] , __snake_case :int ):
'''simple docstring'''
__magic_name__ : int =TFDPRQuestionEncoder(config=__snake_case )
__magic_name__ : Union[str, Any] =model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case )
__magic_name__ : List[str] =model(__snake_case , token_type_ids=__snake_case )
__magic_name__ : Union[str, Any] =model(__snake_case )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def A__ ( self :List[Any] , __snake_case :List[str] , __snake_case :Optional[int] , __snake_case :str , __snake_case :Any , __snake_case :str , __snake_case :Optional[Any] , __snake_case :Dict ):
'''simple docstring'''
__magic_name__ : Optional[int] =TFDPRReader(config=__snake_case )
__magic_name__ : List[str] =model(__snake_case , attention_mask=__snake_case )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def A__ ( self :Optional[Any] ):
'''simple docstring'''
__magic_name__ : str =self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) : Union[str, Any] =config_and_inputs
__magic_name__ : List[str] ={"""input_ids""": input_ids}
return config, inputs_dict
@require_tf
class __A ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
UpperCamelCase = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
UpperCamelCase = {"""feature-extraction""": TFDPRQuestionEncoder} if is_tf_available() else {}
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =TFDPRModelTester(self )
__magic_name__ : Optional[Any] =ConfigTester(self , config_class=__snake_case , hidden_size=37 )
def A__ ( self :Dict ):
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ ( self :Tuple ):
'''simple docstring'''
__magic_name__ : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*__snake_case )
def A__ ( self :List[Any] ):
'''simple docstring'''
__magic_name__ : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*__snake_case )
def A__ ( self :Any ):
'''simple docstring'''
__magic_name__ : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*__snake_case )
@slow
def A__ ( self :Tuple ):
'''simple docstring'''
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ : Dict =TFDPRContextEncoder.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ : Dict =TFDPRContextEncoder.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ : Union[str, Any] =TFDPRQuestionEncoder.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ : List[str] =TFDPRReader.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
@require_tf
class __A ( unittest.TestCase ):
@slow
def A__ ( self :Optional[Any] ):
'''simple docstring'''
__magic_name__ : int =TFDPRQuestionEncoder.from_pretrained("""facebook/dpr-question_encoder-single-nq-base""" )
__magic_name__ : int =tf.constant(
[[1_01, 75_92, 10_10, 20_03, 20_26, 38_99, 1_01_40, 10_29, 1_02]] ) # [CLS] hello, is my dog cute? [SEP]
__magic_name__ : Optional[Any] =model(__snake_case )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
__magic_name__ : Tuple =tf.constant(
[
[
0.03236253,
0.12753335,
0.16818509,
0.00279786,
0.3896933,
0.24264945,
0.2178971,
-0.02335227,
-0.08481959,
-0.14324117,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 367 | 1 |
__A = 256
# Modulus to hash a string
__A = 1000003
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
_A = len(_lowercase )
_A = len(_lowercase )
if p_len > t_len:
return False
_A = 0
_A = 0
_A = 1
# Calculating the hash of pattern and substring of text
for i in range(_lowercase ):
_A = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
_A = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
_A = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
_A = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def __A ( ):
'''simple docstring'''
_A = "abc1abc12"
_A = "alskfjaldsabc1abc1abc12k23adsfabcabc"
_A = "alskfjaldsk23adsfabcabc"
assert rabin_karp(_lowercase , _lowercase ) and not rabin_karp(_lowercase , _lowercase )
# Test 2)
_A = "ABABX"
_A = "ABABZABABYABABX"
assert rabin_karp(_lowercase , _lowercase )
# Test 3)
_A = "AAAB"
_A = "ABAAAAAB"
assert rabin_karp(_lowercase , _lowercase )
# Test 4)
_A = "abcdabcy"
_A = "abcxabcdabxabcdabcdabcy"
assert rabin_karp(_lowercase , _lowercase )
# Test 5)
_A = "Lü"
_A = "Lüsai"
assert rabin_karp(_lowercase , _lowercase )
_A = "Lue"
assert not rabin_karp(_lowercase , _lowercase )
print('''Success.''' )
if __name__ == "__main__":
test_rabin_karp()
| 484 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : List[Any] = logging.get_logger(__name__)
a : Any = {"""openai-gpt""": """https://huggingface.co/openai-gpt/resolve/main/config.json"""}
class _lowercase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE: int = 'openai-gpt'
SCREAMING_SNAKE_CASE: List[str] = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , lowerCamelCase__=40_478 , lowerCamelCase__=512 , lowerCamelCase__=768 , lowerCamelCase__=12 , lowerCamelCase__=12 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=1E-5 , lowerCamelCase__=0.0_2 , lowerCamelCase__="cls_index" , lowerCamelCase__=True , lowerCamelCase__=None , lowerCamelCase__=True , lowerCamelCase__=0.1 , **lowerCamelCase__ , ):
lowerCAmelCase_: Union[str, Any] = vocab_size
lowerCAmelCase_: List[Any] = n_positions
lowerCAmelCase_: Tuple = n_embd
lowerCAmelCase_: Optional[int] = n_layer
lowerCAmelCase_: Optional[int] = n_head
lowerCAmelCase_: int = afn
lowerCAmelCase_: str = resid_pdrop
lowerCAmelCase_: Optional[int] = embd_pdrop
lowerCAmelCase_: Optional[int] = attn_pdrop
lowerCAmelCase_: Dict = layer_norm_epsilon
lowerCAmelCase_: List[Any] = initializer_range
lowerCAmelCase_: Union[str, Any] = summary_type
lowerCAmelCase_: Any = summary_use_proj
lowerCAmelCase_: Dict = summary_activation
lowerCAmelCase_: Dict = summary_first_dropout
lowerCAmelCase_: List[Any] = summary_proj_to_labels
super().__init__(**lowerCamelCase__ ) | 613 | 0 |
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
A_ = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
A_ = 2_5_0_0_0_4
A_ = 2_5_0_0_2_0
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE_ ( lowercase_ , unittest.TestCase ):
"""simple docstring"""
A__ = MBartaaTokenizer
A__ = MBartaaTokenizerFast
A__ = True
A__ = True
def __magic_name__ ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase__ = MBartaaTokenizer(_lowerCAmelCase , src_lang="en_XX" , tgt_lang="ro_RO" , keep_accents=_lowerCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def __magic_name__ ( self ):
lowerCamelCase__ = "<s>"
lowerCamelCase__ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCAmelCase ) , _lowerCAmelCase )
def __magic_name__ ( self ):
lowerCamelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(_lowerCAmelCase ) , 1054 )
def __magic_name__ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1054 )
def __magic_name__ ( self ):
lowerCamelCase__ = MBartaaTokenizer(_lowerCAmelCase , src_lang="en_XX" , tgt_lang="ro_RO" , keep_accents=_lowerCAmelCase )
lowerCamelCase__ = tokenizer.tokenize("This is a test" )
self.assertListEqual(_lowerCAmelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowerCamelCase__ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
_lowerCAmelCase , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", "."] , )
lowerCamelCase__ = tokenizer.convert_tokens_to_ids(_lowerCAmelCase )
self.assertListEqual(
_lowerCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
lowerCamelCase__ = tokenizer.convert_ids_to_tokens(_lowerCAmelCase )
self.assertListEqual(
_lowerCAmelCase , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", "."] , )
@slow
def __magic_name__ ( self ):
# fmt: off
lowerCamelCase__ = {"input_ids": [[25_0004, 1_1062, 8_2772, 7, 15, 8_2772, 538, 5_1529, 237, 1_7198, 1290, 206, 9, 21_5175, 1314, 136, 1_7198, 1290, 206, 9, 5_6359, 42, 12_2009, 9, 1_6466, 16, 8_7344, 4537, 9, 4717, 7_8381, 6, 15_9958, 7, 15, 2_4480, 618, 4, 527, 2_2693, 5428, 4, 2777, 2_4480, 9874, 4, 4_3523, 594, 4, 803, 1_8392, 3_3189, 18, 4, 4_3523, 2_4447, 1_2399, 100, 2_4955, 8_3658, 9626, 14_4057, 15, 839, 2_2335, 16, 136, 2_4955, 8_3658, 8_3479, 15, 3_9102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 12_2009, 11_5774, 23, 805, 1328, 4_6876, 7, 136, 5_3894, 1940, 4_2227, 4_1159, 1_7721, 823, 425, 4, 2_7512, 9_8722, 206, 136, 5531, 4970, 919, 1_7336, 5, 2], [25_0004, 2_0080, 618, 83, 8_2775, 47, 479, 9, 1517, 73, 5_3894, 333, 8_0581, 11_0117, 1_8811, 5256, 1295, 51, 15_2526, 297, 7986, 390, 12_4416, 538, 3_5431, 214, 98, 1_5044, 2_5737, 136, 7108, 4_3701, 23, 756, 13_5355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_0004, 581, 6_3773, 11_9455, 6, 14_7797, 8_8203, 7, 645, 70, 21, 3285, 1_0269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowerCAmelCase , model_name="facebook/mbart-large-50" , revision="d3913889c59cd5c9e456b269c376325eabad57e2" , )
def __magic_name__ ( self ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
lowerCamelCase__ = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-mbart50", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCamelCase__ = self.rust_tokenizer_class.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
lowerCamelCase__ = self.tokenizer_class.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
lowerCamelCase__ = tempfile.mkdtemp()
lowerCamelCase__ = tokenizer_r.save_pretrained(_lowerCAmelCase )
lowerCamelCase__ = tokenizer_p.save_pretrained(_lowerCAmelCase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
lowerCamelCase__ = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f )
self.assertSequenceEqual(_lowerCAmelCase , _lowerCAmelCase )
# Checks everything loads correctly in the same way
lowerCamelCase__ = tokenizer_r.from_pretrained(_lowerCAmelCase )
lowerCamelCase__ = tokenizer_p.from_pretrained(_lowerCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_lowerCAmelCase , _lowerCAmelCase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(_lowerCAmelCase )
# Save tokenizer rust, legacy_format=True
lowerCamelCase__ = tempfile.mkdtemp()
lowerCamelCase__ = tokenizer_r.save_pretrained(_lowerCAmelCase , legacy_format=_lowerCAmelCase )
lowerCamelCase__ = tokenizer_p.save_pretrained(_lowerCAmelCase )
# Checks it save with the same files
self.assertSequenceEqual(_lowerCAmelCase , _lowerCAmelCase )
# Checks everything loads correctly in the same way
lowerCamelCase__ = tokenizer_r.from_pretrained(_lowerCAmelCase )
lowerCamelCase__ = tokenizer_p.from_pretrained(_lowerCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_lowerCAmelCase , _lowerCAmelCase ) )
shutil.rmtree(_lowerCAmelCase )
# Save tokenizer rust, legacy_format=False
lowerCamelCase__ = tempfile.mkdtemp()
lowerCamelCase__ = tokenizer_r.save_pretrained(_lowerCAmelCase , legacy_format=_lowerCAmelCase )
lowerCamelCase__ = tokenizer_p.save_pretrained(_lowerCAmelCase )
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
lowerCamelCase__ = tokenizer_r.from_pretrained(_lowerCAmelCase )
lowerCamelCase__ = tokenizer_p.from_pretrained(_lowerCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_lowerCAmelCase , _lowerCAmelCase ) )
shutil.rmtree(_lowerCAmelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
A__ = "facebook/mbart-large-50-one-to-many-mmt"
A__ = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.",
]
A__ = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"
" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"
" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
A__ = [EN_CODE, 8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2]
@classmethod
def __magic_name__ ( cls ):
lowerCamelCase__ = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="en_XX" , tgt_lang="ro_RO" )
lowerCamelCase__ = 1
return cls
def __magic_name__ ( self ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ar_AR"] , 25_0001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["en_EN"] , 25_0004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ro_RO"] , 25_0020 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["mr_IN"] , 25_0038 )
def __magic_name__ ( self ):
lowerCamelCase__ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , _lowerCAmelCase )
def __magic_name__ ( self ):
self.assertIn(_lowerCAmelCase , self.tokenizer.all_special_ids )
lowerCamelCase__ = [RO_CODE, 884, 9019, 96, 9, 916, 8_6792, 36, 1_8743, 1_5596, 5, 2]
lowerCamelCase__ = self.tokenizer.decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase )
lowerCamelCase__ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertNotIn(self.tokenizer.eos_token , _lowerCAmelCase )
def __magic_name__ ( self ):
lowerCamelCase__ = ["this is gunna be a long sentence " * 20]
assert isinstance(src_text[0] , _lowerCAmelCase )
lowerCamelCase__ = 10
lowerCamelCase__ = self.tokenizer(_lowerCAmelCase , max_length=_lowerCAmelCase , truncation=_lowerCAmelCase ).input_ids[0]
self.assertEqual(ids[0] , _lowerCAmelCase )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
def __magic_name__ ( self ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"] ) , [25_0053, 25_0001] )
def __magic_name__ ( self ):
lowerCamelCase__ = tempfile.mkdtemp()
lowerCamelCase__ = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(_lowerCAmelCase )
lowerCamelCase__ = MBartaaTokenizer.from_pretrained(_lowerCAmelCase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , _lowerCAmelCase )
@require_torch
def __magic_name__ ( self ):
lowerCamelCase__ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_lowerCAmelCase , return_tensors="pt" )
lowerCamelCase__ = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def __magic_name__ ( self ):
lowerCamelCase__ = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , )
lowerCamelCase__ = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
lowerCamelCase__ = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , _lowerCAmelCase )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def __magic_name__ ( self ):
lowerCamelCase__ = self.tokenizer(self.src_text , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=3 , return_tensors="pt" )
lowerCamelCase__ = self.tokenizer(
text_target=self.tgt_text , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=10 , return_tensors="pt" )
lowerCamelCase__ = targets["input_ids"]
lowerCamelCase__ = shift_tokens_right(_lowerCAmelCase , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def __magic_name__ ( self ):
lowerCamelCase__ = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="ar_AR" )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) , {
# en_XX, A, test, EOS
"input_ids": [[25_0004, 62, 3034, 2]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 25_0001,
} , )
| 712 |
from __future__ import annotations
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self , _lowerCAmelCase , _lowerCAmelCase ):
lowerCamelCase__ , lowerCamelCase__ = text, pattern
lowerCamelCase__ , lowerCamelCase__ = len(_lowerCAmelCase ), len(_lowerCAmelCase )
def __magic_name__ ( self , _lowerCAmelCase ):
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def __magic_name__ ( self , _lowerCAmelCase ):
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def __magic_name__ ( self ):
# searches pattern in text and returns index positions
lowerCamelCase__ = []
for i in range(self.textLen - self.patLen + 1 ):
lowerCamelCase__ = self.mismatch_in_text(_lowerCAmelCase )
if mismatch_index == -1:
positions.append(_lowerCAmelCase )
else:
lowerCamelCase__ = self.match_in_pattern(self.text[mismatch_index] )
lowerCamelCase__ = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
A_ = "ABAABA"
A_ = "AB"
A_ = BoyerMooreSearch(text, pattern)
A_ = bms.bad_character_heuristic()
if len(positions) == 0:
print("No match found")
else:
print("Pattern found in following positions: ")
print(positions)
| 360 | 0 |
"""simple docstring"""
from functools import lru_cache
@lru_cache
def __snake_case ( _lowercase ):
"""simple docstring"""
if num < 0:
raise ValueError('''Number should not be negative.''' )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 34 |
from __future__ import annotations
def _A ( SCREAMING_SNAKE_CASE__ : tuple[int, int] , SCREAMING_SNAKE_CASE__ : int ):
UpperCamelCase , UpperCamelCase :List[Any] = position
UpperCamelCase :Any = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
UpperCamelCase :Dict = []
for position in positions:
UpperCamelCase , UpperCamelCase :str = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(SCREAMING_SNAKE_CASE__ )
return permissible_positions
def _A ( SCREAMING_SNAKE_CASE__ : list[list[int]] ):
return not any(elem == 0 for row in board for elem in row )
def _A ( SCREAMING_SNAKE_CASE__ : list[list[int]] , SCREAMING_SNAKE_CASE__ : tuple[int, int] , SCREAMING_SNAKE_CASE__ : int ):
if is_complete(SCREAMING_SNAKE_CASE__ ):
return True
for position in get_valid_pos(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) ):
UpperCamelCase , UpperCamelCase :Optional[int] = position
if board[y][x] == 0:
UpperCamelCase :Any = curr + 1
if open_knight_tour_helper(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , curr + 1 ):
return True
UpperCamelCase :Union[str, Any] = 0
return False
def _A ( SCREAMING_SNAKE_CASE__ : int ):
UpperCamelCase :List[Any] = [[0 for i in range(SCREAMING_SNAKE_CASE__ )] for j in range(SCREAMING_SNAKE_CASE__ )]
for i in range(SCREAMING_SNAKE_CASE__ ):
for j in range(SCREAMING_SNAKE_CASE__ ):
UpperCamelCase :Tuple = 1
if open_knight_tour_helper(SCREAMING_SNAKE_CASE__ , (i, j) , 1 ):
return board
UpperCamelCase :str = 0
UpperCamelCase :List[Any] = F'''Open Kight Tour cannot be performed on a board of size {n}'''
raise ValueError(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 658 | 0 |
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def lowerCamelCase_ ( lowerCAmelCase__ : int , lowerCAmelCase__ : str , **lowerCAmelCase__ : Any ) -> Any:
'''simple docstring'''
A = AutoConfig.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
A = AutoModelForSeqaSeqLM.from_config(lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
AutoTokenizer.from_pretrained(lowerCAmelCase__ ).save_pretrained(lowerCAmelCase__ )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version) | 717 |
from __future__ import annotations
def lowerCamelCase_ ( lowerCAmelCase__ : list[float] ) -> bool:
'''simple docstring'''
if len(lowerCAmelCase__ ) < 2:
raise ValueError('Monogons and Digons are not polygons in the Euclidean space' )
if any(i <= 0 for i in nums ):
raise ValueError('All values must be greater than 0' )
A = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod() | 224 | 0 |
def A__ ( snake_case_ : int = 1_000_000 ):
SCREAMING_SNAKE_CASE__: List[str]= set(range(3 , snake_case_ , 2 ) )
primes.add(2 )
for p in range(3 , snake_case_ , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , snake_case_ , snake_case_ ) ) )
SCREAMING_SNAKE_CASE__: Optional[int]= [float(snake_case_ ) for n in range(limit + 1 )]
for p in primes:
for n in range(snake_case_ , limit + 1 , snake_case_ ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 64 |
"""simple docstring"""
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
_A : List[str] = get_logger(__name__)
class a__ :
__lowerCAmelCase = """dummy_data"""
__lowerCAmelCase = """datasets"""
__lowerCAmelCase = False
def __init__( self , _a , _a , _a , _a = None , _a = False , _a = True , _a = None , ):
lowercase : int = 0
lowercase : Optional[Any] = dataset_name
lowercase : List[str] = cache_dir
lowercase : Union[str, Any] = use_local_dummy_data
lowercase : str = config
# download_callbacks take a single url as input
lowercase : List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
lowercase : List[Any] = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
lowercase : Tuple = str(_a )
# to be downloaded
lowercase : Tuple = None
lowercase : List[Any] = None
@property
def __magic_name__ ( self ):
if self._dummy_file is None:
lowercase : Optional[int] = self.download_dummy_data()
return self._dummy_file
@property
def __magic_name__ ( self ):
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("dummy" , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join("dummy" , self.version_name )
@property
def __magic_name__ ( self ):
return os.path.join(self.dummy_data_folder , "dummy_data.zip" )
def __magic_name__ ( self ):
lowercase : Optional[Any] = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
lowercase : str = cached_path(
_a , cache_dir=self.cache_dir , extract_compressed_file=_a , force_extract=_a )
return os.path.join(_a , self.dummy_file_name )
@property
def __magic_name__ ( self ):
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def __magic_name__ ( self ):
if self._bucket_url is None:
lowercase : Dict = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , "/" ) )
return self._bucket_url
@property
def __magic_name__ ( self ):
# return full path if its a dir
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , "/" ).split("/" )[:-1] )
def __magic_name__ ( self , _a , *_a ):
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
lowercase : Optional[int] = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
lowercase : Optional[Any] = self.dummy_file_name
# special case when data_url is a dict
if isinstance(_a , _a ):
return self.create_dummy_data_dict(_a , _a )
elif isinstance(_a , (list, tuple) ):
return self.create_dummy_data_list(_a , _a )
else:
return self.create_dummy_data_single(_a , _a )
def __magic_name__ ( self , _a , *_a ):
return self.download_and_extract(_a )
def __magic_name__ ( self , _a , _a ):
return self.download_and_extract(_a )
def __magic_name__ ( self , _a , *_a , **_a ):
return path
def __magic_name__ ( self ):
return {}
def __magic_name__ ( self , _a , _a ):
lowercase : List[str] = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(_a , _a ):
for single_url in single_urls:
download_callback(_a )
else:
lowercase : Union[str, Any] = single_urls
download_callback(_a )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(_a , _a ):
lowercase : Any = [os.path.join(_a , urllib.parse.quote_plus(Path(_a ).name ) ) for x in single_urls]
else:
lowercase : int = single_urls
lowercase : Tuple = os.path.join(_a , urllib.parse.quote_plus(Path(_a ).name ) )
lowercase : List[str] = value
# make sure that values are unique
if all(isinstance(_a , _a ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
lowercase : str = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def __magic_name__ ( self , _a , _a ):
lowercase : Union[str, Any] = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
lowercase : Any = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}" , _a ) ) for url in data_url )
lowercase : List[Any] = all(
url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed" ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
lowercase : Tuple = [data_url[0]] * len(_a )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(_a )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowercase : Union[str, Any] = os.path.join(_a , urllib.parse.quote_plus(single_url.split("/" )[-1] ) )
dummy_data_list.append(_a )
return dummy_data_list
def __magic_name__ ( self , _a , _a ):
for download_callback in self.download_callbacks:
download_callback(_a )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowercase : Tuple = os.path.join(_a , urllib.parse.quote_plus(data_url.split("/" )[-1] ) )
if os.path.exists(_a ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def __magic_name__ ( self ):
pass
def __magic_name__ ( self ):
pass
def __magic_name__ ( self , _a ):
def _iter_archive_members(_a ):
# this preserves the order of the members inside the ZIP archive
lowercase : Optional[int] = Path(self.dummy_file ).parent
lowercase : List[str] = path.relative_to(_a )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
lowercase : List[str] = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(_a )
lowercase : Union[str, Any] = Path(_a )
lowercase : List[Any] = _iter_archive_members(_a ) if self.use_local_dummy_data else path.rglob("*" )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith((".", "__") ):
yield file_path.relative_to(_a ).as_posix(), file_path.open("rb" )
def __magic_name__ ( self , _a ):
if not isinstance(_a , _a ):
lowercase : Any = [paths]
for path in paths:
if os.path.isfile(_a ):
if os.path.basename(_a ).startswith((".", "__") ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(_a ):
if os.path.basename(_a ).startswith((".", "__") ):
continue
dirnames.sort()
for filename in sorted(_a ):
if filename.startswith((".", "__") ):
continue
yield os.path.join(_a , _a )
| 361 | 0 |
'''simple docstring'''
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class __SCREAMING_SNAKE_CASE :
lowerCamelCase_ = 42
lowerCamelCase_ = None
lowerCamelCase_ = None
def _lowerCAmelCase ( __magic_name__ : TreeNode | None ) -> bool:
# Validation
def is_valid_tree(__magic_name__ : TreeNode | None ) -> bool:
if node is None:
return True
if not isinstance(__magic_name__ , __magic_name__ ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(__magic_name__ ):
raise ValueError(
'''Each node should be type of TreeNode and data should be float.''' )
def is_binary_search_tree_recursive_check(
__magic_name__ : TreeNode | None , __magic_name__ : float , __magic_name__ : float ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , __magic_name__ , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , __magic_name__ )
)
return is_binary_search_tree_recursive_check(__magic_name__ , -float('''inf''' ) , float('''inf''' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 88 |
'''simple docstring'''
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ):
lowerCamelCase_ = None
lowerCamelCase_ = BloomTokenizerFast
lowerCamelCase_ = BloomTokenizerFast
lowerCamelCase_ = True
lowerCamelCase_ = False
lowerCamelCase_ = 'tokenizer_file'
lowerCamelCase_ = {'bos_token': '<s>', 'eos_token': '</s>', 'unk_token': '<unk>', 'pad_token': '<pad>'}
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
super().setUp()
lowercase : Union[str, Any] =BloomTokenizerFast.from_pretrained('''bigscience/tokenizer''' )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase_ ( self : Union[str, Any] , **UpperCAmelCase__ : Any ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase__ )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : str =self.get_rust_tokenizer()
lowercase : List[str] =['''The quick brown fox</s>''', '''jumps over the lazy dog</s>''']
lowercase : Any =[[2175, 23714, 73173, 144252, 2], [77, 132619, 3478, 368, 109586, 35433, 2]]
lowercase : Any =tokenizer.batch_encode_plus(UpperCAmelCase__ )['''input_ids''']
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase : int =tokenizer.batch_decode(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : Any=6 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase : Optional[int] =self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
lowercase : Tuple ='''This is a simple input'''
lowercase : int =['''This is a simple input 1''', '''This is a simple input 2''']
lowercase : Optional[Any] =('''This is a simple input''', '''This is a pair''')
lowercase : int =[
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
try:
tokenizer_r.encode(UpperCAmelCase__ , max_length=UpperCAmelCase__ )
tokenizer_r.encode_plus(UpperCAmelCase__ , max_length=UpperCAmelCase__ )
tokenizer_r.batch_encode_plus(UpperCAmelCase__ , max_length=UpperCAmelCase__ )
tokenizer_r.encode(UpperCAmelCase__ , max_length=UpperCAmelCase__ )
tokenizer_r.batch_encode_plus(UpperCAmelCase__ , max_length=UpperCAmelCase__ )
except ValueError:
self.fail('''Bloom Tokenizer should be able to deal with padding''' )
lowercase : Optional[int] =None # Hotfixing padding = None
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Simple input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Simple input
self.assertRaises(
UpperCAmelCase__ , tokenizer_r.batch_encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' , )
# Pair input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Pair input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Pair input
self.assertRaises(
UpperCAmelCase__ , tokenizer_r.batch_encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' , )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : Union[str, Any] =self.get_rust_tokenizer()
lowercase : Dict =load_dataset('''xnli''' , '''all_languages''' , split='''test''' , streaming=UpperCAmelCase__ )
lowercase : Union[str, Any] =next(iter(UpperCAmelCase__ ) )['''premise'''] # pick up one data
lowercase : int =list(sample_data.values() )
lowercase : Any =list(map(tokenizer.encode , UpperCAmelCase__ ) )
lowercase : List[str] =[tokenizer.decode(UpperCAmelCase__ , clean_up_tokenization_spaces=UpperCAmelCase__ ) for x in output_tokens]
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
# The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have
# any sequence length constraints. This test of the parent class will fail since it relies on the
# maximum sequence length of the positoonal embeddings.
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 88 | 1 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {"""tokenizer_file""": """tokenizer.json"""}
UpperCamelCase_ = {
"""tokenizer_file""": {
"""bigscience/tokenizer""": """https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json""",
"""bigscience/bloom-560m""": """https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json""",
"""bigscience/bloom-1b1""": """https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json""",
"""bigscience/bloom-1b7""": """https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json""",
"""bigscience/bloom-3b""": """https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json""",
"""bigscience/bloom-7b1""": """https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json""",
"""bigscience/bloom""": """https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json""",
},
}
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = VOCAB_FILES_NAMES
lowerCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ = ['input_ids', 'attention_mask']
lowerCamelCase_ = None
def __init__( self : List[str] , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Union[str, Any]="<unk>" , UpperCAmelCase__ : Optional[Any]="<s>" , UpperCAmelCase__ : List[Any]="</s>" , UpperCAmelCase__ : Optional[Any]="<pad>" , UpperCAmelCase__ : Dict=False , UpperCAmelCase__ : int=False , **UpperCAmelCase__ : int , ):
'''simple docstring'''
super().__init__(
UpperCAmelCase__ , UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , clean_up_tokenization_spaces=UpperCAmelCase__ , **UpperCAmelCase__ , )
lowercase : str =json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , UpperCAmelCase__ ) != add_prefix_space:
lowercase : int =getattr(UpperCAmelCase__ , pre_tok_state.pop('''type''' ) )
lowercase : int =add_prefix_space
lowercase : Optional[Any] =pre_tok_class(**UpperCAmelCase__ )
lowercase : Optional[Any] =add_prefix_space
def lowerCamelCase_ ( self : Optional[Any] , *UpperCAmelCase__ : Tuple , **UpperCAmelCase__ : Union[str, Any] ):
'''simple docstring'''
lowercase : Optional[Any] =kwargs.get('''is_split_into_words''' , UpperCAmelCase__ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'''
''' pretokenized inputs.''' )
return super()._batch_encode_plus(*UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCamelCase_ ( self : List[str] , *UpperCAmelCase__ : Optional[Any] , **UpperCAmelCase__ : Optional[int] ):
'''simple docstring'''
lowercase : Dict =kwargs.get('''is_split_into_words''' , UpperCAmelCase__ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'''
''' pretokenized inputs.''' )
return super()._encode_plus(*UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ):
'''simple docstring'''
lowercase : Dict =self._tokenizer.model.save(UpperCAmelCase__ , name=UpperCAmelCase__ )
return tuple(UpperCAmelCase__ )
def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : "Conversation" ):
'''simple docstring'''
lowercase : Any =[]
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ) + [self.eos_token_id] )
if len(UpperCAmelCase__ ) > self.model_max_length:
lowercase : Any =input_ids[-self.model_max_length :]
return input_ids
| 92 |
"""simple docstring"""
__A : Optional[int] = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
__A : str = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
__A : str = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 499 | 0 |
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __magic_name__ ( unittest.TestCase):
def _UpperCAmelCase ( self : int ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def _UpperCAmelCase ( self : List[str] ):
UpperCAmelCase , UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-2" ,revision="bf16" ,dtype=jnp.bfloataa ,)
UpperCAmelCase = "A painting of a squirrel eating a burger"
UpperCAmelCase = jax.device_count()
UpperCAmelCase = num_samples * [prompt]
UpperCAmelCase = sd_pipe.prepare_inputs(__SCREAMING_SNAKE_CASE )
UpperCAmelCase = replicate(__SCREAMING_SNAKE_CASE )
UpperCAmelCase = shard(__SCREAMING_SNAKE_CASE )
UpperCAmelCase = jax.random.PRNGKey(0 )
UpperCAmelCase = jax.random.split(__SCREAMING_SNAKE_CASE ,jax.device_count() )
UpperCAmelCase = sd_pipe(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,num_inference_steps=2_5 ,jit=__SCREAMING_SNAKE_CASE )[0]
assert images.shape == (jax.device_count(), 1, 7_6_8, 7_6_8, 3)
UpperCAmelCase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
UpperCAmelCase = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
UpperCAmelCase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
UpperCAmelCase = jnp.array([0.4238, 0.4414, 0.4395, 0.4453, 0.4629, 0.4590, 0.4531, 0.4_5508, 0.4512] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def _UpperCAmelCase ( self : Any ):
UpperCAmelCase = "stabilityai/stable-diffusion-2"
UpperCAmelCase , UpperCAmelCase = FlaxDPMSolverMultistepScheduler.from_pretrained(__SCREAMING_SNAKE_CASE ,subfolder="scheduler" )
UpperCAmelCase , UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
__SCREAMING_SNAKE_CASE ,scheduler=__SCREAMING_SNAKE_CASE ,revision="bf16" ,dtype=jnp.bfloataa ,)
UpperCAmelCase = scheduler_params
UpperCAmelCase = "A painting of a squirrel eating a burger"
UpperCAmelCase = jax.device_count()
UpperCAmelCase = num_samples * [prompt]
UpperCAmelCase = sd_pipe.prepare_inputs(__SCREAMING_SNAKE_CASE )
UpperCAmelCase = replicate(__SCREAMING_SNAKE_CASE )
UpperCAmelCase = shard(__SCREAMING_SNAKE_CASE )
UpperCAmelCase = jax.random.PRNGKey(0 )
UpperCAmelCase = jax.random.split(__SCREAMING_SNAKE_CASE ,jax.device_count() )
UpperCAmelCase = sd_pipe(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,num_inference_steps=2_5 ,jit=__SCREAMING_SNAKE_CASE )[0]
assert images.shape == (jax.device_count(), 1, 7_6_8, 7_6_8, 3)
UpperCAmelCase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
UpperCAmelCase = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
UpperCAmelCase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
UpperCAmelCase = jnp.array([0.4336, 0.4_2969, 0.4453, 0.4199, 0.4297, 0.4531, 0.4434, 0.4434, 0.4297] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 706 |
from __future__ import annotations
def __UpperCamelCase ( _lowerCAmelCase ): # This function is recursive
"""simple docstring"""
UpperCAmelCase = len(_lowerCAmelCase )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
UpperCAmelCase = array[0]
UpperCAmelCase = False
UpperCAmelCase = 1
UpperCAmelCase = []
while not is_found and i < array_length:
if array[i] < pivot:
UpperCAmelCase = True
UpperCAmelCase = [element for element in array[i:] if element >= array[i]]
UpperCAmelCase = longest_subsequence(_lowerCAmelCase )
if len(_lowerCAmelCase ) > len(_lowerCAmelCase ):
UpperCAmelCase = temp_array
else:
i += 1
UpperCAmelCase = [element for element in array[1:] if element >= pivot]
UpperCAmelCase = [pivot, *longest_subsequence(_lowerCAmelCase )]
if len(_lowerCAmelCase ) > len(_lowerCAmelCase ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 405 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
a_ = None
a_ = logging.get_logger(__name__)
a_ = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
a_ = {
'''vocab_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json''',
},
}
a_ = {
'''xlnet-base-cased''': None,
'''xlnet-large-cased''': None,
}
a_ = '''▁'''
# Segments (not really needed)
a_ = 0
a_ = 1
a_ = 2
a_ = 3
a_ = 4
class lowercase__ ( _UpperCAmelCase ):
a_ =VOCAB_FILES_NAMES
a_ =PRETRAINED_VOCAB_FILES_MAP
a_ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ ="""left"""
a_ =XLNetTokenizer
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<sep>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="<cls>" , __UpperCAmelCase="<mask>" , __UpperCAmelCase=["<eop>", "<eod>"] , **__UpperCAmelCase , )-> Any:
'''simple docstring'''
lowerCAmelCase__ = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
super().__init__(
vocab_file=__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , remove_space=__UpperCAmelCase , keep_accents=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , **__UpperCAmelCase , )
lowerCAmelCase__ = 3
lowerCAmelCase__ = do_lower_case
lowerCAmelCase__ = remove_space
lowerCAmelCase__ = keep_accents
lowerCAmelCase__ = vocab_file
lowerCAmelCase__ = False if not self.vocab_file else True
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None )-> List[int]:
'''simple docstring'''
lowerCAmelCase__ = [self.sep_token_id]
lowerCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None )-> List[int]:
'''simple docstring'''
lowerCAmelCase__ = [self.sep_token_id]
lowerCAmelCase__ = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None )-> Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(__UpperCAmelCase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
lowerCAmelCase__ = os.path.join(
__UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ):
copyfile(self.vocab_file , __UpperCAmelCase )
return (out_vocab_file,)
| 339 |
from __future__ import annotations
def _a ( UpperCamelCase_ : list[float] ) -> float:
"""simple docstring"""
lowerCAmelCase__ = 0.00
lowerCAmelCase__ = 0
for resistor in resistors:
if resistor <= 0:
lowerCAmelCase__ = F"Resistor at index {index} has a negative or zero value!"
raise ValueError(UpperCamelCase_ )
first_sum += 1 / float(UpperCamelCase_ )
index += 1
return 1 / first_sum
def _a ( UpperCamelCase_ : list[float] ) -> float:
"""simple docstring"""
lowerCAmelCase__ = 0.00
lowerCAmelCase__ = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
lowerCAmelCase__ = F"Resistor at index {index} has a negative value!"
raise ValueError(UpperCamelCase_ )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 339 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_a : Union[str, Any]= {
"configuration_jukebox": [
"JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP",
"JukeboxConfig",
"JukeboxPriorConfig",
"JukeboxVQVAEConfig",
],
"tokenization_jukebox": ["JukeboxTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Any= [
"JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST",
"JukeboxModel",
"JukeboxPreTrainedModel",
"JukeboxVQVAE",
"JukeboxPrior",
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
_a : Any= _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 192 | """simple docstring"""
import string
import numpy
def __UpperCAmelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> int:
'''simple docstring'''
return b if a == 0 else greatest_common_divisor(b % a , UpperCAmelCase_ )
class UpperCamelCase :
UpperCAmelCase : Any = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
UpperCAmelCase : List[Any] = numpy.vectorize(lambda lowercase : x % 36 )
UpperCAmelCase : Dict = numpy.vectorize(lowercase )
def __init__(self : str , _A : numpy.ndarray) -> None:
__snake_case : str = self.modulus(_A) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
__snake_case : Optional[Any] = encrypt_key.shape[0]
def _lowercase (self : Any , _A : str) -> int:
return self.key_string.index(_A)
def _lowercase (self : Union[str, Any] , _A : int) -> str:
return self.key_string[round(_A)]
def _lowercase (self : Optional[int]) -> None:
__snake_case : Any = round(numpy.linalg.det(self.encrypt_key))
if det < 0:
__snake_case : Any = det % len(self.key_string)
__snake_case : Tuple = len(self.key_string)
if greatest_common_divisor(_A , len(self.key_string)) != 1:
__snake_case : List[str] = (
f"determinant modular {req_l} of encryption key({det}) "
f"is not co prime w.r.t {req_l}.\nTry another key."
)
raise ValueError(_A)
def _lowercase (self : Dict , _A : str) -> str:
__snake_case : str = [char for char in text.upper() if char in self.key_string]
__snake_case : int = chars[-1]
while len(_A) % self.break_key != 0:
chars.append(_A)
return "".join(_A)
def _lowercase (self : Union[str, Any] , _A : str) -> str:
__snake_case : Any = self.process_text(text.upper())
__snake_case : Dict = ''
for i in range(0 , len(_A) - self.break_key + 1 , self.break_key):
__snake_case : Dict = text[i : i + self.break_key]
__snake_case : List[str] = [self.replace_letters(_A) for char in batch]
__snake_case : str = numpy.array([vec]).T
__snake_case : List[Any] = self.modulus(self.encrypt_key.dot(_A)).T.tolist()[
0
]
__snake_case : str = ''.join(
self.replace_digits(_A) for num in batch_encrypted)
encrypted += encrypted_batch
return encrypted
def _lowercase (self : Optional[int]) -> numpy.ndarray:
__snake_case : List[Any] = round(numpy.linalg.det(self.encrypt_key))
if det < 0:
__snake_case : int = det % len(self.key_string)
__snake_case : Optional[Any] = None
for i in range(len(self.key_string)):
if (det * i) % len(self.key_string) == 1:
__snake_case : Dict = i
break
__snake_case : List[str] = (
det_inv
* numpy.linalg.det(self.encrypt_key)
* numpy.linalg.inv(self.encrypt_key)
)
return self.to_int(self.modulus(_A))
def _lowercase (self : int , _A : str) -> str:
__snake_case : int = self.make_decrypt_key()
__snake_case : List[str] = self.process_text(text.upper())
__snake_case : str = ''
for i in range(0 , len(_A) - self.break_key + 1 , self.break_key):
__snake_case : Optional[Any] = text[i : i + self.break_key]
__snake_case : Union[str, Any] = [self.replace_letters(_A) for char in batch]
__snake_case : Tuple = numpy.array([vec]).T
__snake_case : List[str] = self.modulus(decrypt_key.dot(_A)).T.tolist()[0]
__snake_case : str = ''.join(
self.replace_digits(_A) for num in batch_decrypted)
decrypted += decrypted_batch
return decrypted
def __UpperCAmelCase ( ) -> None:
'''simple docstring'''
__snake_case : List[str] = int(input('Enter the order of the encryption key: ' ) )
__snake_case : str = []
print('Enter each row of the encryption key with space separated integers' )
for _ in range(UpperCAmelCase_ ):
__snake_case : Union[str, Any] = [int(UpperCAmelCase_ ) for x in input().split()]
hill_matrix.append(UpperCAmelCase_ )
__snake_case : Dict = HillCipher(numpy.array(UpperCAmelCase_ ) )
print('Would you like to encrypt or decrypt some text? (1 or 2)' )
__snake_case : Optional[Any] = input('\n1. Encrypt\n2. Decrypt\n' )
if option == "1":
__snake_case : int = input('What text would you like to encrypt?: ' )
print('Your encrypted text is:' )
print(hc.encrypt(UpperCAmelCase_ ) )
elif option == "2":
__snake_case : Tuple = input('What text would you like to decrypt?: ' )
print('Your decrypted text is:' )
print(hc.decrypt(UpperCAmelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 192 | 1 |
'''simple docstring'''
from typing import List
import numpy as np
def A_( A : dict):
UpperCamelCase = {key: len(A) for key, value in gen_kwargs.items() if isinstance(A , A)}
if len(set(lists_lengths.values())) > 1:
raise RuntimeError(
(
'Sharding is ambiguous for this dataset: '
+ 'we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n'
+ '\n'.join(f'''\t- key {key} has length {length}''' for key, length in lists_lengths.items())
+ '\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, '
+ 'and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.'
))
UpperCamelCase = max(lists_lengths.values() , default=0)
return max(1 , A)
def A_( A : int , A : int):
UpperCamelCase = []
for group_idx in range(A):
UpperCamelCase = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
UpperCamelCase = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
UpperCamelCase = range(A , start + num_shards_to_add)
shards_indices_per_group.append(A)
return shards_indices_per_group
def A_( A : dict , A : int):
UpperCamelCase = _number_of_shards_in_gen_kwargs(A)
if num_shards == 1:
return [dict(A)]
else:
UpperCamelCase = _distribute_shards(num_shards=A , max_num_jobs=A)
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(A , A)
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(A))
]
def A_( A : List[dict]):
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , A)
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def A_( A : np.random.Generator , A : dict):
UpperCamelCase = {len(A) for value in gen_kwargs.values() if isinstance(A , A)}
UpperCamelCase = {}
for size in list_sizes:
UpperCamelCase = list(range(A))
rng.shuffle(indices_per_size[size])
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
UpperCamelCase = dict(A)
for key, value in shuffled_kwargs.items():
if isinstance(A , A):
UpperCamelCase = [value[i] for i in indices_per_size[len(A)]]
return shuffled_kwargs
| 3 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
lowercase : Tuple = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
lowercase : Dict = {
"""vocab_file""": {
"""unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""unc-nlp/lxmert-base-uncased""": (
"""https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json"""
),
},
}
lowercase : List[Any] = {
"""unc-nlp/lxmert-base-uncased""": 512,
}
lowercase : Tuple = {
"""unc-nlp/lxmert-base-uncased""": {"""do_lower_case""": True},
}
class _a (a__ ):
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = VOCAB_FILES_NAMES
lowerCAmelCase_ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ : Any = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ : Optional[int] = LxmertTokenizer
def __init__( self ,__a=None ,__a=None ,__a=True ,__a="[UNK]" ,__a="[SEP]" ,__a="[PAD]" ,__a="[CLS]" ,__a="[MASK]" ,__a=True ,__a=None ,**__a ,) -> str:
super().__init__(
__a ,tokenizer_file=__a ,do_lower_case=__a ,unk_token=__a ,sep_token=__a ,pad_token=__a ,cls_token=__a ,mask_token=__a ,tokenize_chinese_chars=__a ,strip_accents=__a ,**__a ,)
snake_case : List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" ,__a ) != do_lower_case
or normalizer_state.get("""strip_accents""" ,__a ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" ,__a ) != tokenize_chinese_chars
):
snake_case : Union[str, Any] = getattr(__a ,normalizer_state.pop("""type""" ) )
snake_case : Optional[int] = do_lower_case
snake_case : Optional[int] = strip_accents
snake_case : List[Any] = tokenize_chinese_chars
snake_case : Union[str, Any] = normalizer_class(**__a )
snake_case : str = do_lower_case
def snake_case_ ( self ,__a ,__a=None ) -> Optional[int]:
snake_case : Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case_ ( self ,__a ,__a = None ) -> List[int]:
snake_case : int = [self.sep_token_id]
snake_case : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case_ ( self ,__a ,__a = None ) -> Tuple[str]:
snake_case : Optional[int] = self._tokenizer.model.save(__a ,name=__a )
return tuple(__a )
| 116 | 0 |
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
A = trt.Logger(trt.Logger.WARNING)
A = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
A = logging.getLogger(__name__)
A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--onnx_model_path',
default=None,
type=str,
required=True,
help='Path to ONNX model: ',
)
parser.add_argument(
'--output_dir',
default=None,
type=str,
required=True,
help='The output directory where the model checkpoints and predictions will be written.',
)
# Other parameters
parser.add_argument(
'--tokenizer_name',
default='',
type=str,
required=True,
help='Pretrained tokenizer name or path if not the same as model_name',
)
parser.add_argument(
'--version_2_with_negative',
action='store_true',
help='If true, the SQuAD examples contain some that do not have an answer.',
)
parser.add_argument(
'--null_score_diff_threshold',
type=float,
default=0.0,
help='If null_score - best_non_null is greater than the threshold predict null.',
)
parser.add_argument(
'--max_seq_length',
default=384,
type=int,
help=(
'The maximum total input sequence length after WordPiece tokenization. Sequences '
'longer than this will be truncated, and sequences shorter than this will be padded.'
),
)
parser.add_argument(
'--doc_stride',
default=128,
type=int,
help='When splitting up a long document into chunks, how much stride to take between chunks.',
)
parser.add_argument('--per_device_eval_batch_size', default=8, type=int, help='Batch size per GPU/CPU for evaluation.')
parser.add_argument(
'--n_best_size',
default=20,
type=int,
help='The total number of n-best predictions to generate in the nbest_predictions.json output file.',
)
parser.add_argument(
'--max_answer_length',
default=30,
type=int,
help=(
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
),
)
parser.add_argument('--seed', type=int, default=42, help='random seed for initialization')
parser.add_argument(
'--dataset_name',
type=str,
default=None,
required=True,
help='The name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--dataset_config_name',
type=str,
default=None,
help='The configuration name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--preprocessing_num_workers', type=int, default=4, help='A csv or a json file containing the training data.'
)
parser.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets')
parser.add_argument(
'--fp16',
action='store_true',
help='Whether to use 16-bit (mixed) precision instead of 32-bit',
)
parser.add_argument(
'--int8',
action='store_true',
help='Whether to use INT8',
)
A = parser.parse_args()
if args.tokenizer_name:
A = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.'
)
logger.info('Training/evaluation parameters %s', args)
A = args.per_device_eval_batch_size
A = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
A = True
A = 'temp_engine/bert-fp32.engine'
if args.fpaa:
A = 'temp_engine/bert-fp16.engine'
if args.inta:
A = 'temp_engine/bert-int8.engine'
# import ONNX file
if not os.path.exists('temp_engine'):
os.makedirs('temp_engine')
A = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, 'rb') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
A = [network.get_input(i) for i in range(network.num_inputs)]
A = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
A = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
A = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
A = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, 'wb') as f:
f.write(engine.serialize())
def _lowerCamelCase( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Any , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.asarray(inputs['input_ids'] , dtype=np.intaa )
SCREAMING_SNAKE_CASE_ : Dict = np.asarray(inputs['attention_mask'] , dtype=np.intaa )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.asarray(inputs['token_type_ids'] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , lowerCAmelCase__ )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , lowerCAmelCase__ )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , lowerCAmelCase__ )
# start time
SCREAMING_SNAKE_CASE_ : str = time.time()
# Run inference
context.execute_async(
bindings=[int(lowerCAmelCase__ ) for d_inp in d_inputs] + [int(lowerCAmelCase__ ), int(lowerCAmelCase__ )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
cuda.memcpy_dtoh_async(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Synchronize the stream and take time
stream.synchronize()
# end time
SCREAMING_SNAKE_CASE_ : Any = time.time()
SCREAMING_SNAKE_CASE_ : List[Any] = end_time - start_time
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
A = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
A = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('Evaluation requires a dataset name')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
A = raw_datasets['validation'].column_names
A = 'question' if 'question' in column_names else column_names[0]
A = 'context' if 'context' in column_names else column_names[1]
A = 'answers' if 'answers' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
A = tokenizer.padding_side == 'right'
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"""The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the"""
f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."""
)
A = min(args.max_seq_length, tokenizer.model_max_length)
def _lowerCamelCase( lowerCAmelCase__ : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation='only_second' if pad_on_right else 'only_first' , max_length=lowerCAmelCase__ , stride=args.doc_stride , return_overflowing_tokens=lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , padding='max_length' , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenized_examples.pop('overflow_to_sample_mapping' )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
SCREAMING_SNAKE_CASE_ : Dict = []
for i in range(len(tokenized_examples['input_ids'] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenized_examples.sequence_ids(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
SCREAMING_SNAKE_CASE_ : Optional[int] = sample_mapping[i]
tokenized_examples["example_id"].append(examples['id'][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
SCREAMING_SNAKE_CASE_ : Tuple = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples['offset_mapping'][i] )
]
return tokenized_examples
A = raw_datasets['validation']
# Validation Feature Creation
A = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='Running tokenizer on validation dataset',
)
A = default_data_collator
A = eval_dataset.remove_columns(['example_id', 'offset_mapping'])
A = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def _lowerCamelCase( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Dict="eval" ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = postprocess_qa_predictions(
examples=lowerCAmelCase__ , features=lowerCAmelCase__ , predictions=lowerCAmelCase__ , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=lowerCAmelCase__ , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
SCREAMING_SNAKE_CASE_ : Optional[Any] = [
{'id': k, 'prediction_text': v, 'no_answer_probability': 0.0} for k, v in predictions.items()
]
else:
SCREAMING_SNAKE_CASE_ : str = [{'id': k, 'prediction_text': v} for k, v in predictions.items()]
SCREAMING_SNAKE_CASE_ : List[Any] = [{'id': ex['id'], 'answers': ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=lowerCAmelCase__ , label_ids=lowerCAmelCase__ )
A = load_metric('squad_v2' if args.version_2_with_negative else 'squad')
# Evaluation!
logger.info('Loading ONNX model %s for evaluation', args.onnx_model_path)
with open(engine_name, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def _lowerCamelCase( lowerCAmelCase__ : Any ):
'''simple docstring'''
return trt.volume(engine.get_binding_shape(lowerCAmelCase__ ) ) * engine.get_binding_dtype(lowerCAmelCase__ ).itemsize
# Allocate device memory for inputs and outputs.
A = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
A = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
A = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
A = cuda.mem_alloc(h_outputa.nbytes)
A = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
A = cuda.Stream()
# Evaluation
logger.info('***** Running Evaluation *****')
logger.info(f""" Num examples = {len(eval_dataset)}""")
logger.info(f""" Batch size = {args.per_device_eval_batch_size}""")
A = 0.0
A = 0
A = timeit.default_timer()
A = None
for step, batch in enumerate(eval_dataloader):
A , A = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
A , A = outputs
A = torch.tensor(start_logits)
A = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
A = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
A = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
A = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
A = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
A = nested_truncate(all_preds, len(eval_dataset))
A = timeit.default_timer() - start_time
logger.info(' Evaluation done in total %f secs (%f sec per example)', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('Average Inference Time = {:.3f} ms'.format(total_time * 1000 / niter))
logger.info('Total Inference Time = {:.3f} ms'.format(total_time * 1000))
logger.info('Total Number of Inference = %d', niter)
A = post_processing_function(eval_examples, eval_dataset, all_preds)
A = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f"""Evaluation metrics: {eval_metric}""") | 97 |
from __future__ import annotations
from collections import Counter
from random import random
class __a :
'''simple docstring'''
def __init__( self ):
SCREAMING_SNAKE_CASE_ : List[str] = {}
def __snake_case ( self , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE_ : int = {}
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
if nodea not in self.connections:
self.add_node(UpperCamelCase__ )
if nodea not in self.connections:
self.add_node(UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = probability
def __snake_case ( self ):
return list(self.connections )
def __snake_case ( self , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE_ : Dict = 0
SCREAMING_SNAKE_CASE_ : List[Any] = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def _lowerCamelCase( lowerCAmelCase__ : str , lowerCAmelCase__ : list[tuple[str, str, float]] , lowerCAmelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[str] = Counter(graph.get_nodes() )
SCREAMING_SNAKE_CASE_ : Optional[Any] = start
for _ in range(lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ : Dict = graph.transition(lowerCAmelCase__ )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod() | 97 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections import Counter
from random import random
class __UpperCAmelCase :
def __init__( self ):
lowerCAmelCase_ = {}
def UpperCAmelCase_ ( self , _lowerCamelCase ):
lowerCAmelCase_ = {}
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if nodea not in self.connections:
self.add_node(_A )
if nodea not in self.connections:
self.add_node(_A )
lowerCAmelCase_ = probability
def UpperCAmelCase_ ( self ):
return list(self.connections )
def UpperCAmelCase_ ( self , _lowerCamelCase ):
lowerCAmelCase_ = 0
lowerCAmelCase_ = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def snake_case_ ( __snake_case : str , __snake_case : List[str] , __snake_case : Dict) -> Union[str, Any]:
lowerCAmelCase_ = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(__snake_case , __snake_case , __snake_case)
lowerCAmelCase_ = Counter(graph.get_nodes())
lowerCAmelCase_ = start
for _ in range(__snake_case):
lowerCAmelCase_ = graph.transition(__snake_case)
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 274 | import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class lowerCAmelCase_ ( __lowercase, __lowercase, __lowercase, unittest.TestCase ):
UpperCAmelCase = StableUnCLIPPipeline
UpperCAmelCase = TEXT_TO_IMAGE_PARAMS
UpperCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
UpperCAmelCase = False
def UpperCamelCase_ ( self : Optional[int] ):
_UpperCamelCase = 32
_UpperCamelCase = embedder_hidden_size
# prior components
torch.manual_seed(0 )
_UpperCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
_UpperCamelCase = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_A , projection_dim=_A , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
_UpperCamelCase = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=_A , num_layers=1 , )
torch.manual_seed(0 )
_UpperCamelCase = DDPMScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=1000 , clip_sample=_A , clip_sample_range=5.0 , beta_schedule='''squaredcos_cap_v2''' , )
# regular denoising components
torch.manual_seed(0 )
_UpperCamelCase = StableUnCLIPImageNormalizer(embedding_dim=_A )
_UpperCamelCase = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
_UpperCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
_UpperCamelCase = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_A , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
_UpperCamelCase = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_A , layers_per_block=1 , upcast_attention=_A , use_linear_projection=_A , )
torch.manual_seed(0 )
_UpperCamelCase = DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.0_0085 , beta_end=0.012 , prediction_type='''v_prediction''' , set_alpha_to_one=_A , steps_offset=1 , )
torch.manual_seed(0 )
_UpperCamelCase = AutoencoderKL()
_UpperCamelCase = {
# prior components
'''prior_tokenizer''': prior_tokenizer,
'''prior_text_encoder''': prior_text_encoder,
'''prior''': prior,
'''prior_scheduler''': prior_scheduler,
# image noising components
'''image_normalizer''': image_normalizer,
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder,
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
}
return components
def UpperCamelCase_ ( self : Dict , _A : Tuple , _A : Dict=0 ):
if str(_A ).startswith('''mps''' ):
_UpperCamelCase = torch.manual_seed(_A )
else:
_UpperCamelCase = torch.Generator(device=_A ).manual_seed(_A )
_UpperCamelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''prior_num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = torch_device == '''cpu'''
self._test_attention_slicing_forward_pass(test_max_difference=_A )
def UpperCamelCase_ ( self : List[Any] ):
_UpperCamelCase = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=_A )
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
def UpperCamelCase_ ( self : Optional[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy''' )
_UpperCamelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_UpperCamelCase = torch.Generator(device='''cpu''' ).manual_seed(0 )
_UpperCamelCase = pipe('''anime turle''' , generator=_A , output_type='''np''' )
_UpperCamelCase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_A , _A )
def UpperCamelCase_ ( self : Optional[Any] ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_UpperCamelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
_UpperCamelCase = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_UpperCamelCase = pipe(
'''anime turtle''' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='''np''' , )
_UpperCamelCase = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 10 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class __UpperCAmelCase :
def __init__( self , _lowerCamelCase , ):
lowerCamelCase__ =parent
lowerCamelCase__ =13
lowerCamelCase__ =7
lowerCamelCase__ =True
lowerCamelCase__ =True
lowerCamelCase__ =False
lowerCamelCase__ =True
lowerCamelCase__ =99
lowerCamelCase__ =32
lowerCamelCase__ =2
lowerCamelCase__ =4
lowerCamelCase__ =37
lowerCamelCase__ ="gelu"
lowerCamelCase__ =0.1
lowerCamelCase__ =0.1
lowerCamelCase__ =512
lowerCamelCase__ =16
lowerCamelCase__ =2
lowerCamelCase__ =0.0_2
lowerCamelCase__ =3
lowerCamelCase__ =4
lowerCamelCase__ =None
def _a ( self ):
lowerCamelCase__ =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ =None
if self.use_input_mask:
lowerCamelCase__ =random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ =None
lowerCamelCase__ =None
lowerCamelCase__ =None
if self.use_labels:
lowerCamelCase__ =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase__ =ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase__ =DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ =TFDistilBertModel(config=_lowerCamelCase )
lowerCamelCase__ ={"input_ids": input_ids, "attention_mask": input_mask}
lowerCamelCase__ =model(_lowerCamelCase )
lowerCamelCase__ =[input_ids, input_mask]
lowerCamelCase__ =model(_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ =TFDistilBertForMaskedLM(config=_lowerCamelCase )
lowerCamelCase__ ={"input_ids": input_ids, "attention_mask": input_mask}
lowerCamelCase__ =model(_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ =TFDistilBertForQuestionAnswering(config=_lowerCamelCase )
lowerCamelCase__ ={
"input_ids": input_ids,
"attention_mask": input_mask,
}
lowerCamelCase__ =model(_lowerCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ =self.num_labels
lowerCamelCase__ =TFDistilBertForSequenceClassification(_lowerCamelCase )
lowerCamelCase__ ={"input_ids": input_ids, "attention_mask": input_mask}
lowerCamelCase__ =model(_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ =self.num_choices
lowerCamelCase__ =TFDistilBertForMultipleChoice(_lowerCamelCase )
lowerCamelCase__ =tf.tile(tf.expand_dims(_lowerCamelCase , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase__ =tf.tile(tf.expand_dims(_lowerCamelCase , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase__ ={
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
}
lowerCamelCase__ =model(_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ =self.num_labels
lowerCamelCase__ =TFDistilBertForTokenClassification(_lowerCamelCase )
lowerCamelCase__ ={"input_ids": input_ids, "attention_mask": input_mask}
lowerCamelCase__ =model(_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a ( self ):
lowerCamelCase__ =self.prepare_config_and_inputs()
((lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__)) =config_and_inputs
lowerCamelCase__ ={"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class __UpperCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
A__ : str = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
A__ : Optional[Any] = (
{
'''feature-extraction''': TFDistilBertModel,
'''fill-mask''': TFDistilBertForMaskedLM,
'''question-answering''': TFDistilBertForQuestionAnswering,
'''text-classification''': TFDistilBertForSequenceClassification,
'''token-classification''': TFDistilBertForTokenClassification,
'''zero-shot''': TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
A__ : Optional[int] = False
A__ : Any = False
def _a ( self ):
lowerCamelCase__ =TFDistilBertModelTester(self )
lowerCamelCase__ =ConfigTester(self , config_class=_lowerCamelCase , dim=37 )
def _a ( self ):
self.config_tester.run_common_tests()
def _a ( self ):
lowerCamelCase__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*_lowerCamelCase )
def _a ( self ):
lowerCamelCase__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*_lowerCamelCase )
def _a ( self ):
lowerCamelCase__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*_lowerCamelCase )
def _a ( self ):
lowerCamelCase__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*_lowerCamelCase )
def _a ( self ):
lowerCamelCase__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*_lowerCamelCase )
def _a ( self ):
lowerCamelCase__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*_lowerCamelCase )
@slow
def _a ( self ):
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
lowerCamelCase__ =TFDistilBertModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
@require_tf
class __UpperCAmelCase ( unittest.TestCase ):
@slow
def _a ( self ):
lowerCamelCase__ =TFDistilBertModel.from_pretrained("distilbert-base-uncased" )
lowerCamelCase__ =tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase__ =model(_lowerCamelCase )[0]
lowerCamelCase__ =[1, 6, 768]
self.assertEqual(output.shape , _lowerCamelCase )
lowerCamelCase__ =tf.constant(
[
[
[0.1_9_2_6_1_8_8_5, -0.1_3_7_3_2_9_5_5, 0.4_1_1_9_7_9_9],
[0.2_2_1_5_0_1_5_6, -0.0_7_4_2_2_6_6_1, 0.3_9_0_3_7_2_0_4],
[0.2_2_7_5_6_0_1_8, -0.0_8_9_6_4_1_4, 0.3_7_0_1_4_6_7],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _lowerCamelCase , atol=1E-4 )
| 711 | """simple docstring"""
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
a =1E-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class __UpperCAmelCase :
def __init__( self , _lowerCamelCase , _lowerCamelCase=16 , _lowerCamelCase=13 , _lowerCamelCase=7 , _lowerCamelCase=14 , _lowerCamelCase=10 , _lowerCamelCase=19 , _lowerCamelCase=5 , _lowerCamelCase=4 , _lowerCamelCase=True , _lowerCamelCase=16 , _lowerCamelCase=2 , _lowerCamelCase=4 , _lowerCamelCase=4 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=[1, 2, 3, 4, 5] , _lowerCamelCase=25 , _lowerCamelCase=5 , ):
lowerCamelCase__ =d_model
lowerCamelCase__ =parent
lowerCamelCase__ =batch_size
lowerCamelCase__ =prediction_length
lowerCamelCase__ =context_length
lowerCamelCase__ =cardinality
lowerCamelCase__ =num_time_features
lowerCamelCase__ =lags_sequence
lowerCamelCase__ =embedding_dimension
lowerCamelCase__ =is_training
lowerCamelCase__ =hidden_size
lowerCamelCase__ =num_hidden_layers
lowerCamelCase__ =num_attention_heads
lowerCamelCase__ =intermediate_size
lowerCamelCase__ =hidden_act
lowerCamelCase__ =hidden_dropout_prob
lowerCamelCase__ =attention_probs_dropout_prob
lowerCamelCase__ =context_length
lowerCamelCase__ =prediction_length + label_length
lowerCamelCase__ =label_length
lowerCamelCase__ =moving_average
lowerCamelCase__ =autocorrelation_factor
def _a ( self ):
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def _a ( self , _lowerCamelCase ):
lowerCamelCase__ =config.context_length + max(config.lags_sequence )
lowerCamelCase__ =ids_tensor([self.batch_size, 1] , config.cardinality[0] )
lowerCamelCase__ =floats_tensor([self.batch_size, _past_length, config.num_time_features] )
lowerCamelCase__ =floats_tensor([self.batch_size, _past_length] )
lowerCamelCase__ =floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
lowerCamelCase__ =floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
lowerCamelCase__ =floats_tensor([self.batch_size, config.prediction_length] )
lowerCamelCase__ ={
"past_values": past_values,
"static_categorical_features": static_categorical_features,
"past_time_features": past_time_features,
"past_observed_mask": past_observed_mask,
"future_time_features": future_time_features,
"future_values": future_values,
}
return inputs_dict
def _a ( self ):
lowerCamelCase__ =self.get_config()
lowerCamelCase__ =self.prepare_autoformer_inputs_dict(_lowerCamelCase )
return config, inputs_dict
def _a ( self ):
lowerCamelCase__ , lowerCamelCase__ =self.prepare_config_and_inputs()
return config, inputs_dict
def _a ( self , _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ =AutoformerModel(config=_lowerCamelCase ).to(_lowerCamelCase ).eval()
lowerCamelCase__ =model(**_lowerCamelCase )
lowerCamelCase__ =outputs.encoder_last_hidden_state
lowerCamelCase__ =outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase__ =model.get_encoder()
encoder.save_pretrained(_lowerCamelCase )
lowerCamelCase__ =AutoformerEncoder.from_pretrained(_lowerCamelCase ).to(_lowerCamelCase )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ =model.create_network_inputs(**_lowerCamelCase )
lowerCamelCase__ , lowerCamelCase__ =model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
lowerCamelCase__ =torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
lowerCamelCase__ =encoder(inputs_embeds=_lowerCamelCase )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
lowerCamelCase__ =(
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
lowerCamelCase__ =torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
lowerCamelCase__ =torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
lowerCamelCase__ =torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase__ =model.get_decoder()
decoder.save_pretrained(_lowerCamelCase )
lowerCamelCase__ =AutoformerDecoder.from_pretrained(_lowerCamelCase ).to(_lowerCamelCase )
lowerCamelCase__ =decoder(
trend=_lowerCamelCase , inputs_embeds=_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class __UpperCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
A__ : Any = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
A__ : Union[str, Any] = (AutoformerForPrediction,) if is_torch_available() else ()
A__ : List[Any] = {'''feature-extraction''': AutoformerModel} if is_torch_available() else {}
A__ : Optional[Any] = False
A__ : Optional[Any] = False
A__ : Any = False
A__ : List[Any] = False
A__ : Union[str, Any] = False
A__ : Tuple = False
def _a ( self ):
lowerCamelCase__ =AutoformerModelTester(self )
lowerCamelCase__ =ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase )
def _a ( self ):
self.config_tester.run_common_tests()
def _a ( self ):
lowerCamelCase__ , lowerCamelCase__ =self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
lowerCamelCase__ =model_class(_lowerCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_lowerCamelCase )
lowerCamelCase__ , lowerCamelCase__ =model_class.from_pretrained(_lowerCamelCase , output_loading_info=_lowerCamelCase )
self.assertEqual(info["missing_keys"] , [] )
def _a ( self ):
lowerCamelCase__ =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*_lowerCamelCase )
@unittest.skip(reason="Model has no tokens embeddings" )
def _a ( self ):
pass
def _a ( self ):
lowerCamelCase__ =inspect.signature(getattr(_lowerCamelCase , "forward" ) )
# The main input is the name of the argument after `self`
lowerCamelCase__ =list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , _lowerCamelCase )
def _a ( self ):
lowerCamelCase__ , lowerCamelCase__ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ =model_class(_lowerCamelCase )
lowerCamelCase__ =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ =[*signature.parameters.keys()]
lowerCamelCase__ =[
"past_values",
"past_time_features",
"past_observed_mask",
"static_categorical_features",
"static_real_features",
"future_values",
"future_time_features",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("future_observed_mask" )
expected_arg_names.extend(
[
"decoder_attention_mask",
"head_mask",
"decoder_head_mask",
"cross_attn_head_mask",
"encoder_outputs",
"past_key_values",
"output_hidden_states",
"output_attentions",
"use_cache",
"return_dict",
] )
self.assertListEqual(arg_names[: len(_lowerCamelCase )] , _lowerCamelCase )
def _a ( self ):
lowerCamelCase__ , lowerCamelCase__ =self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ =True
lowerCamelCase__ =getattr(self.model_tester , "seq_length" , _lowerCamelCase )
lowerCamelCase__ =getattr(self.model_tester , "decoder_seq_length" , _lowerCamelCase )
lowerCamelCase__ =getattr(self.model_tester , "encoder_seq_length" , _lowerCamelCase )
lowerCamelCase__ =getattr(self.model_tester , "d_model" , _lowerCamelCase )
lowerCamelCase__ =getattr(self.model_tester , "num_attention_heads" , _lowerCamelCase )
lowerCamelCase__ =d_model // num_attention_heads
for model_class in self.all_model_classes:
lowerCamelCase__ =True
lowerCamelCase__ =False
lowerCamelCase__ =True
lowerCamelCase__ =model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
with torch.no_grad():
lowerCamelCase__ =model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
lowerCamelCase__ =outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_lowerCamelCase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCamelCase__ =True
lowerCamelCase__ =model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
with torch.no_grad():
lowerCamelCase__ =model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
lowerCamelCase__ =outputs.encoder_attentions
self.assertEqual(len(_lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
lowerCamelCase__ =len(_lowerCamelCase )
lowerCamelCase__ =7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
# decoder attentions
lowerCamelCase__ =outputs.decoder_attentions
self.assertIsInstance(_lowerCamelCase , (list, tuple) )
self.assertEqual(len(_lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
lowerCamelCase__ =outputs.cross_attentions
self.assertIsInstance(_lowerCamelCase , (list, tuple) )
self.assertEqual(len(_lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
lowerCamelCase__ =True
lowerCamelCase__ =True
lowerCamelCase__ =model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
with torch.no_grad():
lowerCamelCase__ =model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
self.assertEqual(out_len + 2 , len(_lowerCamelCase ) )
lowerCamelCase__ =outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def _a ( self ):
super().test_retain_grad_hidden_states_attentions()
def lowerCamelCase_ ( __lowerCAmelCase="train-batch.pt" ) -> Tuple:
'''simple docstring'''
lowerCamelCase__ =hf_hub_download(repo_id="hf-internal-testing/tourism-monthly-batch" , filename=__lowerCAmelCase , repo_type="dataset" )
lowerCamelCase__ =torch.load(__lowerCAmelCase , map_location=__lowerCAmelCase )
return batch
@require_torch
@slow
class __UpperCAmelCase ( unittest.TestCase ):
def _a ( self ):
lowerCamelCase__ =AutoformerModel.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(_lowerCamelCase )
lowerCamelCase__ =prepare_batch()
with torch.no_grad():
lowerCamelCase__ =model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , future_values=batch["future_values"] , future_time_features=batch["future_time_features"] , )[0]
lowerCamelCase__ =torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , _lowerCamelCase )
lowerCamelCase__ =torch.tensor(
[[0.3_5_9_3, -1.3_3_9_8, 0.6_3_3_0], [0.2_2_7_9, 1.5_3_9_6, -0.1_7_9_2], [0.0_4_5_0, 1.3_2_2_5, -0.2_3_3_5]] , device=_lowerCamelCase )
self.assertTrue(torch.allclose(output[0, :3, :3] , _lowerCamelCase , atol=_lowerCamelCase ) )
def _a ( self ):
lowerCamelCase__ =AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(_lowerCamelCase )
lowerCamelCase__ =prepare_batch("val-batch.pt" )
with torch.no_grad():
lowerCamelCase__ =model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , ).encoder_last_hidden_state
lowerCamelCase__ =torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , _lowerCamelCase )
lowerCamelCase__ =torch.tensor(
[[-0.0_7_3_4, -0.9_0_3_6, 0.8_3_5_8], [4.7_1_8_6, 2.4_1_1_3, 1.9_5_8_1], [1.7_9_5_3, 2.3_5_5_8, 1.2_9_7_0]] , device=_lowerCamelCase )
self.assertTrue(torch.allclose(output[0, :3, :3] , _lowerCamelCase , atol=_lowerCamelCase ) )
def _a ( self ):
lowerCamelCase__ =AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(_lowerCamelCase )
lowerCamelCase__ =prepare_batch("val-batch.pt" )
with torch.no_grad():
lowerCamelCase__ =model.generate(
static_categorical_features=batch["static_categorical_features"] , past_time_features=batch["past_time_features"] , past_values=batch["past_values"] , future_time_features=batch["future_time_features"] , past_observed_mask=batch["past_observed_mask"] , )
lowerCamelCase__ =torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , _lowerCamelCase )
lowerCamelCase__ =torch.tensor([3_1_3_0.6_7_6_3, 4_0_5_6.5_2_9_3, 7_0_5_3.0_7_8_6] , device=_lowerCamelCase )
lowerCamelCase__ =outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , _lowerCamelCase , rtol=1E-1 ) )
| 132 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"google/canine-s": "https://huggingface.co/google/canine-s/resolve/main/config.json",
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class UpperCAmelCase_ (lowerCamelCase_ ):
"""simple docstring"""
UpperCamelCase_ : Optional[int] = """canine"""
def __init__( self : int , a_ : str=7_68 , a_ : Dict=12 , a_ : Union[str, Any]=12 , a_ : Tuple=30_72 , a_ : Union[str, Any]="gelu" , a_ : Tuple=0.1 , a_ : Dict=0.1 , a_ : List[str]=1_63_84 , a_ : Optional[int]=16 , a_ : Optional[Any]=0.02 , a_ : Union[str, Any]=1E-12 , a_ : Union[str, Any]=0 , a_ : Optional[Any]=0XE000 , a_ : Dict=0XE001 , a_ : str=4 , a_ : Dict=4 , a_ : Optional[int]=8 , a_ : List[Any]=1_63_84 , a_ : Any=1_28 , **a_ : Optional[Any] , )-> List[str]:
"""simple docstring"""
super().__init__(pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , **a_ )
UpperCAmelCase_ : Tuple = max_position_embeddings
UpperCAmelCase_ : List[str] = hidden_size
UpperCAmelCase_ : Any = num_hidden_layers
UpperCAmelCase_ : Any = num_attention_heads
UpperCAmelCase_ : Optional[int] = intermediate_size
UpperCAmelCase_ : Tuple = hidden_act
UpperCAmelCase_ : Optional[int] = hidden_dropout_prob
UpperCAmelCase_ : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase_ : str = initializer_range
UpperCAmelCase_ : str = type_vocab_size
UpperCAmelCase_ : int = layer_norm_eps
# Character config:
UpperCAmelCase_ : str = downsampling_rate
UpperCAmelCase_ : List[Any] = upsampling_kernel_size
UpperCAmelCase_ : Dict = num_hash_functions
UpperCAmelCase_ : Union[str, Any] = num_hash_buckets
UpperCAmelCase_ : int = local_transformer_stride
| 470 |
"""simple docstring"""
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope="""session""" )
def A_ ( ) -> str:
"""simple docstring"""
UpperCAmelCase_ : int = 10
UpperCAmelCase_ : Optional[Any] = datasets.Features(
{
"""tokens""": datasets.Sequence(datasets.Value("""string""" ) ),
"""labels""": datasets.Sequence(datasets.ClassLabel(names=["""negative""", """positive"""] ) ),
"""answers""": datasets.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
"""id""": datasets.Value("""int64""" ),
} )
UpperCAmelCase_ : List[Any] = datasets.Dataset.from_dict(
{
"""tokens""": [["""foo"""] * 5] * n,
"""labels""": [[1] * 5] * n,
"""answers""": [{"""answer_start""": [97], """text""": ["""1976"""]}] * 10,
"""id""": list(range(lowercase ) ),
} , features=lowercase , )
return dataset
@pytest.fixture(scope="""session""" )
def A_ ( lowercase , lowercase ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = str(tmp_path_factory.mktemp("""data""" ) / """file.arrow""" )
dataset.map(cache_file_name=lowercase )
return filename
# FILE_CONTENT + files
lowercase_ = "\\n Text data.\n Second line of data."
@pytest.fixture(scope="""session""" )
def A_ ( lowercase ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : int = tmp_path_factory.mktemp("""data""" ) / """file.txt"""
UpperCAmelCase_ : Tuple = FILE_CONTENT
with open(lowercase , """w""" ) as f:
f.write(lowercase )
return filename
@pytest.fixture(scope="""session""" )
def A_ ( lowercase ) -> Dict:
"""simple docstring"""
import bza
UpperCAmelCase_ : int = tmp_path_factory.mktemp("""data""" ) / """file.txt.bz2"""
UpperCAmelCase_ : List[str] = bytes(lowercase , """utf-8""" )
with bza.open(lowercase , """wb""" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="""session""" )
def A_ ( lowercase ) -> List[str]:
"""simple docstring"""
import gzip
UpperCAmelCase_ : List[str] = str(tmp_path_factory.mktemp("""data""" ) / """file.txt.gz""" )
UpperCAmelCase_ : Optional[Any] = bytes(lowercase , """utf-8""" )
with gzip.open(lowercase , """wb""" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="""session""" )
def A_ ( lowercase ) -> Union[str, Any]:
"""simple docstring"""
if datasets.config.LZ4_AVAILABLE:
import lza.frame
UpperCAmelCase_ : Dict = tmp_path_factory.mktemp("""data""" ) / """file.txt.lz4"""
UpperCAmelCase_ : str = bytes(lowercase , """utf-8""" )
with lza.frame.open(lowercase , """wb""" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="""session""" )
def A_ ( lowercase , lowercase ) -> Optional[Any]:
"""simple docstring"""
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
UpperCAmelCase_ : List[str] = tmp_path_factory.mktemp("""data""" ) / """file.txt.7z"""
with pyazr.SevenZipFile(lowercase , """w""" ) as archive:
archive.write(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="""session""" )
def A_ ( lowercase , lowercase ) -> Union[str, Any]:
"""simple docstring"""
import tarfile
UpperCAmelCase_ : Optional[int] = tmp_path_factory.mktemp("""data""" ) / """file.txt.tar"""
with tarfile.TarFile(lowercase , """w""" ) as f:
f.add(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="""session""" )
def A_ ( lowercase ) -> int:
"""simple docstring"""
import lzma
UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt.xz"""
UpperCAmelCase_ : List[str] = bytes(lowercase , """utf-8""" )
with lzma.open(lowercase , """wb""" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="""session""" )
def A_ ( lowercase , lowercase ) -> Any:
"""simple docstring"""
import zipfile
UpperCAmelCase_ : Any = tmp_path_factory.mktemp("""data""" ) / """file.txt.zip"""
with zipfile.ZipFile(lowercase , """w""" ) as f:
f.write(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="""session""" )
def A_ ( lowercase ) -> Dict:
"""simple docstring"""
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
UpperCAmelCase_ : List[str] = tmp_path_factory.mktemp("""data""" ) / """file.txt.zst"""
UpperCAmelCase_ : Optional[int] = bytes(lowercase , """utf-8""" )
with zstd.open(lowercase , """wb""" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="""session""" )
def A_ ( lowercase ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : int = tmp_path_factory.mktemp("""data""" ) / """file.xml"""
UpperCAmelCase_ : int = textwrap.dedent(
"""\
<?xml version=\"1.0\" encoding=\"UTF-8\" ?>
<tmx version=\"1.4\">
<header segtype=\"sentence\" srclang=\"ca\" />
<body>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 1</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 2</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 3</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 4</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 5</seg></tuv>
</tu>
</body>
</tmx>""" )
with open(lowercase , """w""" ) as f:
f.write(lowercase )
return filename
lowercase_ = [
{"col_1": "0", "col_2": 0, "col_3": 0.0},
{"col_1": "1", "col_2": 1, "col_3": 1.0},
{"col_1": "2", "col_2": 2, "col_3": 2.0},
{"col_1": "3", "col_2": 3, "col_3": 3.0},
]
lowercase_ = [
{"col_1": "4", "col_2": 4, "col_3": 4.0},
{"col_1": "5", "col_2": 5, "col_3": 5.0},
]
lowercase_ = {
"col_1": ["0", "1", "2", "3"],
"col_2": [0, 1, 2, 3],
"col_3": [0.0, 1.0, 2.0, 3.0],
}
lowercase_ = [
{"col_3": 0.0, "col_1": "0", "col_2": 0},
{"col_3": 1.0, "col_1": "1", "col_2": 1},
]
lowercase_ = [
{"col_1": "s0", "col_2": 0, "col_3": 0.0},
{"col_1": "s1", "col_2": 1, "col_3": 1.0},
{"col_1": "s2", "col_2": 2, "col_3": 2.0},
{"col_1": "s3", "col_2": 3, "col_3": 3.0},
]
@pytest.fixture(scope="""session""" )
def A_ ( ) -> Union[str, Any]:
"""simple docstring"""
return DATA_DICT_OF_LISTS
@pytest.fixture(scope="""session""" )
def A_ ( lowercase ) -> int:
"""simple docstring"""
UpperCAmelCase_ : str = datasets.Dataset.from_dict(lowercase )
UpperCAmelCase_ : List[str] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.arrow""" )
dataset.map(cache_file_name=lowercase )
return path
@pytest.fixture(scope="""session""" )
def A_ ( lowercase ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Dict = str(tmp_path_factory.mktemp("""data""" ) / """dataset.sqlite""" )
with contextlib.closing(sqlitea.connect(lowercase ) ) as con:
UpperCAmelCase_ : Tuple = con.cursor()
cur.execute("""CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)""" )
for item in DATA:
cur.execute("""INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)""" , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope="""session""" )
def A_ ( lowercase ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.csv""" )
with open(lowercase , """w""" , newline="""""" ) as f:
UpperCAmelCase_ : Tuple = csv.DictWriter(lowercase , fieldnames=["""col_1""", """col_2""", """col_3"""] )
writer.writeheader()
for item in DATA:
writer.writerow(lowercase )
return path
@pytest.fixture(scope="""session""" )
def A_ ( lowercase ) -> str:
"""simple docstring"""
UpperCAmelCase_ : Tuple = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.csv""" )
with open(lowercase , """w""" , newline="""""" ) as f:
UpperCAmelCase_ : List[Any] = csv.DictWriter(lowercase , fieldnames=["""col_1""", """col_2""", """col_3"""] )
writer.writeheader()
for item in DATA:
writer.writerow(lowercase )
return path
@pytest.fixture(scope="""session""" )
def A_ ( lowercase , lowercase ) -> Optional[Any]:
"""simple docstring"""
import bza
UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.bz2"""
with open(lowercase , """rb""" ) as f:
UpperCAmelCase_ : Dict = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(lowercase , """wb""" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="""session""" )
def A_ ( lowercase , lowercase , lowercase ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip"""
with zipfile.ZipFile(lowercase , """w""" ) as f:
f.write(lowercase , arcname=os.path.basename(lowercase ) )
f.write(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="""session""" )
def A_ ( lowercase , lowercase , lowercase ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Tuple = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip"""
with zipfile.ZipFile(lowercase , """w""" ) as f:
f.write(lowercase , arcname=os.path.basename(csv_path.replace(""".csv""" , """.CSV""" ) ) )
f.write(lowercase , arcname=os.path.basename(csva_path.replace(""".csv""" , """.CSV""" ) ) )
return path
@pytest.fixture(scope="""session""" )
def A_ ( lowercase , lowercase , lowercase ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.csv.zip"""
with zipfile.ZipFile(lowercase , """w""" ) as f:
f.write(lowercase , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase ) ) )
f.write(lowercase , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase ) ) )
return path
@pytest.fixture(scope="""session""" )
def A_ ( lowercase ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.parquet""" )
UpperCAmelCase_ : Dict = pa.schema(
{
"""col_1""": pa.string(),
"""col_2""": pa.intaa(),
"""col_3""": pa.floataa(),
} )
with open(lowercase , """wb""" ) as f:
UpperCAmelCase_ : List[str] = pq.ParquetWriter(lowercase , schema=lowercase )
UpperCAmelCase_ : Any = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(lowercase ) )] for k in DATA[0]} , schema=lowercase )
writer.write_table(lowercase )
writer.close()
return path
@pytest.fixture(scope="""session""" )
def A_ ( lowercase ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" )
UpperCAmelCase_ : Union[str, Any] = {"""data""": DATA}
with open(lowercase , """w""" ) as f:
json.dump(lowercase , lowercase )
return path
@pytest.fixture(scope="""session""" )
def A_ ( lowercase ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" )
UpperCAmelCase_ : Optional[int] = {"""data""": DATA_DICT_OF_LISTS}
with open(lowercase , """w""" ) as f:
json.dump(lowercase , lowercase )
return path
@pytest.fixture(scope="""session""" )
def A_ ( lowercase ) -> str:
"""simple docstring"""
UpperCAmelCase_ : List[str] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl""" )
with open(lowercase , """w""" ) as f:
for item in DATA:
f.write(json.dumps(lowercase ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def A_ ( lowercase ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : str = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.jsonl""" )
with open(lowercase , """w""" ) as f:
for item in DATA:
f.write(json.dumps(lowercase ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def A_ ( lowercase ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : int = str(tmp_path_factory.mktemp("""data""" ) / """dataset_312.jsonl""" )
with open(lowercase , """w""" ) as f:
for item in DATA_312:
f.write(json.dumps(lowercase ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def A_ ( lowercase ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Dict = str(tmp_path_factory.mktemp("""data""" ) / """dataset-str.jsonl""" )
with open(lowercase , """w""" ) as f:
for item in DATA_STR:
f.write(json.dumps(lowercase ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def A_ ( lowercase , lowercase ) -> Dict:
"""simple docstring"""
import gzip
UpperCAmelCase_ : Dict = str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt.gz""" )
with open(lowercase , """rb""" ) as orig_file:
with gzip.open(lowercase , """wb""" ) as zipped_file:
zipped_file.writelines(lowercase )
return path
@pytest.fixture(scope="""session""" )
def A_ ( lowercase , lowercase ) -> str:
"""simple docstring"""
import gzip
UpperCAmelCase_ : Optional[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.gz""" )
with open(lowercase , """rb""" ) as orig_file:
with gzip.open(lowercase , """wb""" ) as zipped_file:
zipped_file.writelines(lowercase )
return path
@pytest.fixture(scope="""session""" )
def A_ ( lowercase , lowercase , lowercase ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Tuple = tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.zip"""
with zipfile.ZipFile(lowercase , """w""" ) as f:
f.write(lowercase , arcname=os.path.basename(lowercase ) )
f.write(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="""session""" )
def A_ ( lowercase , lowercase , lowercase , lowercase ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.zip"""
with zipfile.ZipFile(lowercase , """w""" ) as f:
f.write(lowercase , arcname=os.path.join("""nested""" , os.path.basename(lowercase ) ) )
return path
@pytest.fixture(scope="""session""" )
def A_ ( lowercase , lowercase , lowercase ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.jsonl.zip"""
with zipfile.ZipFile(lowercase , """w""" ) as f:
f.write(lowercase , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase ) ) )
f.write(lowercase , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase ) ) )
return path
@pytest.fixture(scope="""session""" )
def A_ ( lowercase , lowercase , lowercase ) -> int:
"""simple docstring"""
UpperCAmelCase_ : int = tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.tar"""
with tarfile.TarFile(lowercase , """w""" ) as f:
f.add(lowercase , arcname=os.path.basename(lowercase ) )
f.add(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="""session""" )
def A_ ( lowercase , lowercase , lowercase , lowercase ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.tar"""
with tarfile.TarFile(lowercase , """w""" ) as f:
f.add(lowercase , arcname=os.path.join("""nested""" , os.path.basename(lowercase ) ) )
return path
@pytest.fixture(scope="""session""" )
def A_ ( lowercase ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : Tuple = ["""0""", """1""", """2""", """3"""]
UpperCAmelCase_ : str = str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt""" )
with open(lowercase , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def A_ ( lowercase ) -> str:
"""simple docstring"""
UpperCAmelCase_ : Tuple = ["""0""", """1""", """2""", """3"""]
UpperCAmelCase_ : int = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.txt""" )
with open(lowercase , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def A_ ( lowercase ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Dict = ["""0""", """1""", """2""", """3"""]
UpperCAmelCase_ : Optional[int] = tmp_path_factory.mktemp("""data""" ) / """dataset.abc"""
with open(lowercase , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def A_ ( lowercase , lowercase , lowercase ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Any = tmp_path_factory.mktemp("""data""" ) / """dataset.text.zip"""
with zipfile.ZipFile(lowercase , """w""" ) as f:
f.write(lowercase , arcname=os.path.basename(lowercase ) )
f.write(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="""session""" )
def A_ ( lowercase , lowercase , lowercase ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Tuple = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.text.zip"""
with zipfile.ZipFile(lowercase , """w""" ) as f:
f.write(lowercase , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase ) ) )
f.write(lowercase , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase ) ) )
return path
@pytest.fixture(scope="""session""" )
def A_ ( lowercase , lowercase , lowercase ) -> str:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp("""data""" ) / """dataset.ext.zip"""
with zipfile.ZipFile(lowercase , """w""" ) as f:
f.write(lowercase , arcname=os.path.basename("""unsupported.ext""" ) )
f.write(lowercase , arcname=os.path.basename("""unsupported_2.ext""" ) )
return path
@pytest.fixture(scope="""session""" )
def A_ ( lowercase ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : Tuple = """\n""".join(["""First""", """Second\u2029with Unicode new line""", """Third"""] )
UpperCAmelCase_ : List[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset_with_unicode_new_lines.txt""" )
with open(lowercase , """w""" , encoding="""utf-8""" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="""session""" )
def A_ ( ) -> Optional[int]:
"""simple docstring"""
return os.path.join("""tests""" , """features""" , """data""" , """test_image_rgb.jpg""" )
@pytest.fixture(scope="""session""" )
def A_ ( ) -> Tuple:
"""simple docstring"""
return os.path.join("""tests""" , """features""" , """data""" , """test_audio_44100.wav""" )
@pytest.fixture(scope="""session""" )
def A_ ( lowercase , lowercase ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Dict = tmp_path_factory.mktemp("""data""" ) / """dataset.img.zip"""
with zipfile.ZipFile(lowercase , """w""" ) as f:
f.write(lowercase , arcname=os.path.basename(lowercase ) )
f.write(lowercase , arcname=os.path.basename(lowercase ).replace(""".jpg""" , """2.jpg""" ) )
return path
@pytest.fixture(scope="""session""" )
def A_ ( lowercase ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = tmp_path_factory.mktemp("""data_dir""" )
(data_dir / "subdir").mkdir()
with open(data_dir / """subdir""" / """train.txt""" , """w""" ) as f:
f.write("""foo\n""" * 10 )
with open(data_dir / """subdir""" / """test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 10 )
# hidden file
with open(data_dir / """subdir""" / """.test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / """.subdir""" / """train.txt""" , """w""" ) as f:
f.write("""foo\n""" * 10 )
with open(data_dir / """.subdir""" / """test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 10 )
return data_dir
| 470 | 1 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCAmelCase_ = logging.get_logger(__name__)
class lowerCAmelCase ( snake_case ):
lowerCAmelCase__ = ["""pixel_values"""]
def __init__( self , a__ = True , a__ = None , a__ = PIL.Image.BICUBIC , a__ = True , a__ = None , a__ = 1 / 2_55 , a__ = True , a__ = True , a__ = None , a__ = None , **a__ , ):
super().__init__(**a__ )
_UpperCAmelCase = size if size is not None else {'height': 2_56, 'width': 2_56}
_UpperCAmelCase = get_size_dict(a__ )
_UpperCAmelCase = crop_size if crop_size is not None else {'height': 2_24, 'width': 2_24}
_UpperCAmelCase = get_size_dict(a__ , param_name='crop_size' )
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = resample
_UpperCAmelCase = do_center_crop
_UpperCAmelCase = crop_size
_UpperCAmelCase = do_rescale
_UpperCAmelCase = rescale_factor
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __A ( self , a__ , a__ , a__ = PIL.Image.BICUBIC , a__ = None , **a__ , ):
_UpperCAmelCase = get_size_dict(a__ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return resize(
a__ , size=(size['height'], size['width']) , resample=a__ , data_format=a__ , **a__ )
def __A ( self , a__ , a__ , a__ = None , **a__ , ):
_UpperCAmelCase = get_size_dict(a__ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(a__ , size=(size['height'], size['width']) , data_format=a__ , **a__ )
def __A ( self , a__ , a__ , a__ = None , **a__ , ):
return rescale(a__ , scale=a__ , data_format=a__ , **a__ )
def __A ( self , a__ , a__ , a__ , a__ = None , **a__ , ):
return normalize(a__ , mean=a__ , std=a__ , data_format=a__ , **a__ )
def __A ( self , a__ , a__ = None , a__ = None , a__=None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = ChannelDimension.FIRST , **a__ , ):
_UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
_UpperCAmelCase = resample if resample is not None else self.resample
_UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
_UpperCAmelCase = image_std if image_std is not None else self.image_std
_UpperCAmelCase = size if size is not None else self.size
_UpperCAmelCase = get_size_dict(a__ )
_UpperCAmelCase = crop_size if crop_size is not None else self.crop_size
_UpperCAmelCase = get_size_dict(a__ , param_name='crop_size' )
_UpperCAmelCase = make_list_of_images(a__ )
if not valid_images(a__ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
_UpperCAmelCase = [to_numpy_array(a__ ) for image in images]
if do_resize:
_UpperCAmelCase = [self.resize(image=a__ , size=a__ , resample=a__ ) for image in images]
if do_center_crop:
_UpperCAmelCase = [self.center_crop(image=a__ , size=a__ ) for image in images]
if do_rescale:
_UpperCAmelCase = [self.rescale(image=a__ , scale=a__ ) for image in images]
if do_normalize:
_UpperCAmelCase = [self.normalize(image=a__ , mean=a__ , std=a__ ) for image in images]
_UpperCAmelCase = [to_channel_dimension_format(a__ , a__ ) for image in images]
_UpperCAmelCase = {'pixel_values': images}
return BatchFeature(data=a__ , tensor_type=a__ )
| 494 |
"""simple docstring"""
def __lowerCamelCase ( SCREAMING_SNAKE_CASE,SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
return number | (1 << position)
def __lowerCamelCase ( SCREAMING_SNAKE_CASE,SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
return number & ~(1 << position)
def __lowerCamelCase ( SCREAMING_SNAKE_CASE,SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
return number ^ (1 << position)
def __lowerCamelCase ( SCREAMING_SNAKE_CASE,SCREAMING_SNAKE_CASE ) -> bool:
"""simple docstring"""
return ((number >> position) & 1) == 1
def __lowerCamelCase ( SCREAMING_SNAKE_CASE,SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 494 | 1 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCamelCase : str = logging.get_logger(__name__)
_UpperCamelCase : Union[str, Any] = [
('bert.bert', 'visual_bert'),
('bert.cls', 'cls'),
('bert.classifier', 'cls'),
('token_type_embeddings_visual', 'visual_token_type_embeddings'),
('position_embeddings_visual', 'visual_position_embeddings'),
('projection', 'visual_projection'),
]
_UpperCamelCase : Tuple = [
'nlvr2_coco_pre_trained.th',
'nlvr2_fine_tuned.th',
'nlvr2_pre_trained.th',
'vcr_coco_pre_train.th',
'vcr_fine_tune.th',
'vcr_pre_train.th',
'vqa_coco_pre_trained.th',
'vqa_fine_tuned.th',
'vqa_pre_trained.th',
]
def __snake_case ( lowerCAmelCase : Any ):
__UpperCAmelCase = torch.load(lowerCAmelCase , map_location='cpu' )
return sd
def __snake_case ( lowerCAmelCase : Tuple , lowerCAmelCase : str , lowerCAmelCase : List[Any]=rename_keys_prefix ):
__UpperCAmelCase = OrderedDict()
__UpperCAmelCase = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
__UpperCAmelCase = key
for name_pair in rename_keys_prefix:
__UpperCAmelCase = new_key.replace(name_pair[0] , name_pair[1] )
__UpperCAmelCase = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
__UpperCAmelCase = new_d['cls.predictions.bias']
return new_d
@torch.no_grad()
def __snake_case ( lowerCAmelCase : Any , lowerCAmelCase : Dict ):
assert (
checkpoint_path.split('/' )[-1] in ACCEPTABLE_CHECKPOINTS
), F"""The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}."""
# Get Config
if "pre" in checkpoint_path:
__UpperCAmelCase = 'pretraining'
if "vcr" in checkpoint_path:
__UpperCAmelCase = {'visual_embedding_dim': 512}
elif "vqa_advanced" in checkpoint_path:
__UpperCAmelCase = {'visual_embedding_dim': 2048}
elif "vqa" in checkpoint_path:
__UpperCAmelCase = {'visual_embedding_dim': 2048}
elif "nlvr" in checkpoint_path:
__UpperCAmelCase = {'visual_embedding_dim': 1024}
else:
raise NotImplementedError(F"""No implementation found for `{checkpoint_path}`.""" )
else:
if "vcr" in checkpoint_path:
__UpperCAmelCase = {'visual_embedding_dim': 512}
__UpperCAmelCase = 'multichoice'
elif "vqa_advanced" in checkpoint_path:
__UpperCAmelCase = {'visual_embedding_dim': 2048}
__UpperCAmelCase = 'vqa_advanced'
elif "vqa" in checkpoint_path:
__UpperCAmelCase = {'visual_embedding_dim': 2048, 'num_labels': 3129}
__UpperCAmelCase = 'vqa'
elif "nlvr" in checkpoint_path:
__UpperCAmelCase = {
'visual_embedding_dim': 1024,
'num_labels': 2,
}
__UpperCAmelCase = 'nlvr'
__UpperCAmelCase = VisualBertConfig(**lowerCAmelCase )
# Load State Dict
__UpperCAmelCase = load_state_dict(lowerCAmelCase )
__UpperCAmelCase = get_new_dict(lowerCAmelCase , lowerCAmelCase )
if model_type == "pretraining":
__UpperCAmelCase = VisualBertForPreTraining(lowerCAmelCase )
elif model_type == "vqa":
__UpperCAmelCase = VisualBertForQuestionAnswering(lowerCAmelCase )
elif model_type == "nlvr":
__UpperCAmelCase = VisualBertForVisualReasoning(lowerCAmelCase )
elif model_type == "multichoice":
__UpperCAmelCase = VisualBertForMultipleChoice(lowerCAmelCase )
model.load_state_dict(lowerCAmelCase )
# Save Checkpoints
Path(lowerCAmelCase ).mkdir(exist_ok=lowerCAmelCase )
model.save_pretrained(lowerCAmelCase )
if __name__ == "__main__":
_UpperCamelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('orig_checkpoint_path', type=str, help='A path to .th on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', type=str, help='Path to the output PyTorch model.')
_UpperCamelCase : Tuple = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 396 |
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class A :
'''simple docstring'''
A__ = 42
A__ = None
A__ = None
def UpperCamelCase ( ) -> Node | None:
"""simple docstring"""
lowercase__ = Node(1 )
lowercase__ = Node(2 )
lowercase__ = Node(3 )
lowercase__ = Node(4 )
lowercase__ = Node(5 )
return tree
def UpperCamelCase ( __magic_name__ : Node | None ) -> list[int]:
"""simple docstring"""
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def UpperCamelCase ( __magic_name__ : Node | None ) -> list[int]:
"""simple docstring"""
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def UpperCamelCase ( __magic_name__ : Node | None ) -> list[int]:
"""simple docstring"""
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def UpperCamelCase ( __magic_name__ : Node | None ) -> int:
"""simple docstring"""
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def UpperCamelCase ( __magic_name__ : Node | None ) -> Sequence[Node | None]:
"""simple docstring"""
lowercase__ = []
if root is None:
return output
lowercase__ = deque([root] )
while process_queue:
lowercase__ = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def UpperCamelCase ( __magic_name__ : Node | None , __magic_name__ : int ) -> Sequence[Node | None]:
"""simple docstring"""
lowercase__ = []
def populate_output(__magic_name__ : Node | None , __magic_name__ : int ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(__magic_name__ , __magic_name__ )
return output
def UpperCamelCase ( __magic_name__ : Node | None , __magic_name__ : int ) -> Sequence[Node | None]:
"""simple docstring"""
lowercase__ = []
def populate_output(__magic_name__ : Node | None , __magic_name__ : int ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(__magic_name__ , __magic_name__ )
return output
def UpperCamelCase ( __magic_name__ : Node | None ) -> Sequence[Node | None] | list[Any]:
"""simple docstring"""
if root is None:
return []
lowercase__ = []
lowercase__ = 0
lowercase__ = height(__magic_name__ )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(__magic_name__ , __magic_name__ ) )
lowercase__ = 1
else:
output.append(get_nodes_from_right_to_left(__magic_name__ , __magic_name__ ) )
lowercase__ = 0
return output
def UpperCamelCase ( ) -> None: # Main function for testing.
"""simple docstring"""
lowercase__ = make_tree()
print(f'''In-order Traversal: {inorder(__magic_name__ )}''' )
print(f'''Pre-order Traversal: {preorder(__magic_name__ )}''' )
print(f'''Post-order Traversal: {postorder(__magic_name__ )}''' , """\n""" )
print(f'''Height of Tree: {height(__magic_name__ )}''' , """\n""" )
print("""Complete Level Order Traversal: """ )
print(level_order(__magic_name__ ) , """\n""" )
print("""Level-wise order Traversal: """ )
for level in range(1 , height(__magic_name__ ) + 1 ):
print(f'''Level {level}:''' , get_nodes_from_left_to_right(__magic_name__ , level=__magic_name__ ) )
print("""\nZigZag order Traversal: """ )
print(zigzag(__magic_name__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 15 | 0 |
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
lowerCamelCase_ = HUGGINGFACE_HUB_CACHE
lowerCamelCase_ = '''config.json'''
lowerCamelCase_ = '''diffusion_pytorch_model.bin'''
lowerCamelCase_ = '''diffusion_flax_model.msgpack'''
lowerCamelCase_ = '''model.onnx'''
lowerCamelCase_ = '''diffusion_pytorch_model.safetensors'''
lowerCamelCase_ = '''weights.pb'''
lowerCamelCase_ = '''https://huggingface.co'''
lowerCamelCase_ = default_cache_path
lowerCamelCase_ = '''diffusers_modules'''
lowerCamelCase_ = os.getenv('''HF_MODULES_CACHE''', os.path.join(hf_cache_home, '''modules'''))
lowerCamelCase_ = ['''fp16''', '''non-ema''']
lowerCamelCase_ = '''.self_attn''' | 161 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCamelCase_ = {
'''configuration_transfo_xl''': ['''TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TransfoXLConfig'''],
'''tokenization_transfo_xl''': ['''TransfoXLCorpus''', '''TransfoXLTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AdaptiveEmbedding''',
'''TransfoXLForSequenceClassification''',
'''TransfoXLLMHeadModel''',
'''TransfoXLModel''',
'''TransfoXLPreTrainedModel''',
'''load_tf_weights_in_transfo_xl''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFAdaptiveEmbedding''',
'''TFTransfoXLForSequenceClassification''',
'''TFTransfoXLLMHeadModel''',
'''TFTransfoXLMainLayer''',
'''TFTransfoXLModel''',
'''TFTransfoXLPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 161 | 1 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
A = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any=False , lowerCAmelCase__ : Any=False) -> Tuple:
'''simple docstring'''
_lowercase : Tuple = 'backbone.' if is_semantic else ''
_lowercase : List[Any] = []
for i in range(config.num_hidden_layers):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''{prefix}blocks.{i}.norm1.weight''', F'''beit.encoder.layer.{i}.layernorm_before.weight'''))
rename_keys.append((F'''{prefix}blocks.{i}.norm1.bias''', F'''beit.encoder.layer.{i}.layernorm_before.bias'''))
rename_keys.append(
(F'''{prefix}blocks.{i}.attn.proj.weight''', F'''beit.encoder.layer.{i}.attention.output.dense.weight'''))
rename_keys.append(
(F'''{prefix}blocks.{i}.attn.proj.bias''', F'''beit.encoder.layer.{i}.attention.output.dense.bias'''))
rename_keys.append((F'''{prefix}blocks.{i}.norm2.weight''', F'''beit.encoder.layer.{i}.layernorm_after.weight'''))
rename_keys.append((F'''{prefix}blocks.{i}.norm2.bias''', F'''beit.encoder.layer.{i}.layernorm_after.bias'''))
rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc1.weight''', F'''beit.encoder.layer.{i}.intermediate.dense.weight'''))
rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc1.bias''', F'''beit.encoder.layer.{i}.intermediate.dense.bias'''))
rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc2.weight''', F'''beit.encoder.layer.{i}.output.dense.weight'''))
rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc2.bias''', F'''beit.encoder.layer.{i}.output.dense.bias'''))
# projection layer + position embeddings
rename_keys.extend(
[
(F'''{prefix}cls_token''', 'beit.embeddings.cls_token'),
(F'''{prefix}patch_embed.proj.weight''', 'beit.embeddings.patch_embeddings.projection.weight'),
(F'''{prefix}patch_embed.proj.bias''', 'beit.embeddings.patch_embeddings.projection.bias'),
(F'''{prefix}pos_embed''', 'beit.embeddings.position_embeddings'),
])
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
('mask_token', 'beit.embeddings.mask_token'),
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
])
else:
# layernorm + classification head
rename_keys.extend(
[
('fc_norm.weight', 'beit.pooler.layernorm.weight'),
('fc_norm.bias', 'beit.pooler.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
])
return rename_keys
def SCREAMING_SNAKE_CASE ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Tuple=False , lowerCAmelCase__ : Optional[int]=False) -> int:
'''simple docstring'''
for i in range(config.num_hidden_layers):
_lowercase : Any = 'backbone.' if is_semantic else ''
# queries, keys and values
_lowercase : List[Any] = state_dict.pop(F'''{prefix}blocks.{i}.attn.qkv.weight''')
_lowercase : Any = state_dict.pop(F'''{prefix}blocks.{i}.attn.q_bias''')
_lowercase : List[str] = state_dict.pop(F'''{prefix}blocks.{i}.attn.v_bias''')
_lowercase : Any = in_proj_weight[
: config.hidden_size, :
]
_lowercase : Optional[Any] = q_bias
_lowercase : Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowercase : Optional[Any] = in_proj_weight[
-config.hidden_size :, :
]
_lowercase : List[Any] = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
_lowercase : List[str] = state_dict.pop(F'''{prefix}blocks.{i}.gamma_1''')
_lowercase : str = state_dict.pop(F'''{prefix}blocks.{i}.gamma_2''')
_lowercase : Optional[Any] = gamma_a
_lowercase : Dict = gamma_a
def SCREAMING_SNAKE_CASE ( lowerCAmelCase__ : str , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[Any]) -> Optional[Any]:
'''simple docstring'''
_lowercase : Dict = dct.pop(lowerCAmelCase__)
_lowercase : str = val
def SCREAMING_SNAKE_CASE ( ) -> List[Any]:
'''simple docstring'''
_lowercase : Optional[int] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_lowercase : Tuple = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__).raw)
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : str=False) -> Optional[int]:
'''simple docstring'''
_lowercase : List[str] = False if 'rvlcdip' in checkpoint_url else True
_lowercase : List[str] = BeitConfig(use_absolute_position_embeddings=lowerCAmelCase__ , use_mask_token=lowerCAmelCase__)
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
_lowercase : Any = 10_24
_lowercase : List[str] = 40_96
_lowercase : Tuple = 24
_lowercase : Optional[Any] = 16
# labels
if "rvlcdip" in checkpoint_url:
_lowercase : List[str] = 16
_lowercase : Optional[Any] = 'huggingface/label-files'
_lowercase : Tuple = 'rvlcdip-id2label.json'
_lowercase : List[Any] = json.load(open(hf_hub_download(lowerCAmelCase__ , lowerCAmelCase__ , repo_type='dataset') , 'r'))
_lowercase : Optional[Any] = {int(lowerCAmelCase__): v for k, v in idalabel.items()}
_lowercase : Dict = idalabel
_lowercase : Optional[Any] = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
_lowercase : Dict = torch.hub.load_state_dict_from_url(lowerCAmelCase__ , map_location='cpu')['model']
_lowercase : List[str] = create_rename_keys(lowerCAmelCase__ , has_lm_head=lowerCAmelCase__)
for src, dest in rename_keys:
rename_key(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
read_in_q_k_v(lowerCAmelCase__ , lowerCAmelCase__ , has_lm_head=lowerCAmelCase__)
# load HuggingFace model
_lowercase : Any = BeitForMaskedImageModeling(lowerCAmelCase__) if has_lm_head else BeitForImageClassification(lowerCAmelCase__)
model.eval()
model.load_state_dict(lowerCAmelCase__)
# Check outputs on an image
_lowercase : Union[str, Any] = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=lowerCAmelCase__)
_lowercase : Dict = prepare_img()
_lowercase : Optional[int] = image_processor(images=lowerCAmelCase__ , return_tensors='pt')
_lowercase : Any = encoding['pixel_values']
_lowercase : str = model(lowerCAmelCase__)
_lowercase : Dict = outputs.logits
# verify logits
_lowercase : str = [1, 16] if 'rvlcdip' in checkpoint_url else [1, 1_96, 81_92]
assert logits.shape == torch.Size(lowerCAmelCase__), "Shape of logits not as expected"
Path(lowerCAmelCase__).mkdir(exist_ok=lowerCAmelCase__)
print(F'''Saving model to {pytorch_dump_folder_path}''')
model.save_pretrained(lowerCAmelCase__)
print(F'''Saving image processor to {pytorch_dump_folder_path}''')
image_processor.save_pretrained(lowerCAmelCase__)
if push_to_hub:
if has_lm_head:
_lowercase : Union[str, Any] = 'dit-base' if 'base' in checkpoint_url else 'dit-large'
else:
_lowercase : Tuple = 'dit-base-finetuned-rvlcdip' if 'dit-b' in checkpoint_url else 'dit-large-finetuned-rvlcdip'
image_processor.push_to_hub(
repo_path_or_name=Path(lowerCAmelCase__ , lowerCAmelCase__) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=lowerCAmelCase__ , )
model.push_to_hub(
repo_path_or_name=Path(lowerCAmelCase__ , lowerCAmelCase__) , organization='nielsr' , commit_message='Add model' , use_temp_dir=lowerCAmelCase__ , )
if __name__ == "__main__":
A = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
A = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub) | 125 |
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = RobertaTokenizer
lowerCAmelCase__ : Tuple = RobertaTokenizerFast
lowerCAmelCase__ : Tuple = True
lowerCAmelCase__ : Optional[int] = {"cls_token": "<s>"}
def _lowerCamelCase ( self : Any ) -> str:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowercase : List[Any] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
_lowercase : Tuple = dict(zip(UpperCamelCase ,range(len(UpperCamelCase ) ) ) )
_lowercase : str = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
_lowercase : List[str] = {'unk_token': '<unk>'}
_lowercase : Tuple = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
_lowercase : Union[str, Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp:
fp.write(json.dumps(UpperCamelCase ) + '\n' )
with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp:
fp.write('\n'.join(UpperCamelCase ) )
def _lowerCamelCase ( self : Dict ,**UpperCamelCase : Any ) -> int:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname ,**UpperCamelCase )
def _lowerCamelCase ( self : int ,**UpperCamelCase : List[Any] ) -> Any:
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname ,**UpperCamelCase )
def _lowerCamelCase ( self : Union[str, Any] ,UpperCamelCase : Tuple ) -> Union[str, Any]:
_lowercase : int = 'lower newer'
_lowercase : Tuple = 'lower newer'
return input_text, output_text
def _lowerCamelCase ( self : Union[str, Any] ) -> Tuple:
_lowercase : Union[str, Any] = self.tokenizer_class(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
_lowercase : Optional[Any] = 'lower newer'
_lowercase : Any = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
_lowercase : Optional[int] = tokenizer.tokenize(UpperCamelCase ) # , add_prefix_space=True)
self.assertListEqual(UpperCamelCase ,UpperCamelCase )
_lowercase : Union[str, Any] = tokens + [tokenizer.unk_token]
_lowercase : Tuple = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase ) ,UpperCamelCase )
def _lowerCamelCase ( self : Any ) -> Union[str, Any]:
_lowercase : str = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('Hello world!' ,add_special_tokens=UpperCamelCase ) ,[0, 3_1414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode('Hello world! cécé herlolip 418' ,add_special_tokens=UpperCamelCase ) ,[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2] ,)
@slow
def _lowerCamelCase ( self : Any ) -> Any:
_lowercase : Union[str, Any] = self.tokenizer_class.from_pretrained('roberta-base' )
_lowercase : List[str] = tokenizer.encode('sequence builders' ,add_special_tokens=UpperCamelCase )
_lowercase : Any = tokenizer.encode('multi-sequence build' ,add_special_tokens=UpperCamelCase )
_lowercase : Optional[Any] = tokenizer.encode(
'sequence builders' ,add_special_tokens=UpperCamelCase ,add_prefix_space=UpperCamelCase )
_lowercase : List[Any] = tokenizer.encode(
'sequence builders' ,'multi-sequence build' ,add_special_tokens=UpperCamelCase ,add_prefix_space=UpperCamelCase )
_lowercase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(UpperCamelCase )
_lowercase : List[str] = tokenizer.build_inputs_with_special_tokens(UpperCamelCase ,UpperCamelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def _lowerCamelCase ( self : int ) -> str:
_lowercase : Any = self.get_tokenizer()
_lowercase : Optional[Any] = 'Encode this sequence.'
_lowercase : Union[str, Any] = tokenizer.byte_encoder[' '.encode('utf-8' )[0]]
# Testing encoder arguments
_lowercase : Dict = tokenizer.encode(UpperCamelCase ,add_special_tokens=UpperCamelCase ,add_prefix_space=UpperCamelCase )
_lowercase : str = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(UpperCamelCase ,UpperCamelCase )
_lowercase : List[str] = tokenizer.encode(UpperCamelCase ,add_special_tokens=UpperCamelCase ,add_prefix_space=UpperCamelCase )
_lowercase : List[Any] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(UpperCamelCase ,UpperCamelCase )
tokenizer.add_special_tokens({'bos_token': '<s>'} )
_lowercase : Optional[Any] = tokenizer.encode(UpperCamelCase ,add_special_tokens=UpperCamelCase )
_lowercase : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(UpperCamelCase ,UpperCamelCase )
# Testing spaces after special tokens
_lowercase : str = '<mask>'
tokenizer.add_special_tokens(
{'mask_token': AddedToken(UpperCamelCase ,lstrip=UpperCamelCase ,rstrip=UpperCamelCase )} ) # mask token has a left space
_lowercase : int = tokenizer.convert_tokens_to_ids(UpperCamelCase )
_lowercase : Any = 'Encode <mask> sequence'
_lowercase : Dict = 'Encode <mask>sequence'
_lowercase : int = tokenizer.encode(UpperCamelCase )
_lowercase : Optional[int] = encoded.index(UpperCamelCase )
_lowercase : int = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(UpperCamelCase ,UpperCamelCase )
_lowercase : Dict = tokenizer.encode(UpperCamelCase )
_lowercase : Optional[Any] = encoded.index(UpperCamelCase )
_lowercase : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(UpperCamelCase ,UpperCamelCase )
def _lowerCamelCase ( self : int ) -> Optional[Any]:
pass
def _lowerCamelCase ( self : Tuple ) -> List[Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_lowercase : Any = self.rust_tokenizer_class.from_pretrained(UpperCamelCase ,**UpperCamelCase )
_lowercase : Any = self.tokenizer_class.from_pretrained(UpperCamelCase ,**UpperCamelCase )
_lowercase : Any = 'A, <mask> AllenNLP sentence.'
_lowercase : Optional[int] = tokenizer_r.encode_plus(UpperCamelCase ,add_special_tokens=UpperCamelCase ,return_token_type_ids=UpperCamelCase )
_lowercase : Any = tokenizer_p.encode_plus(UpperCamelCase ,add_special_tokens=UpperCamelCase ,return_token_type_ids=UpperCamelCase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ) ,sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) ,sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) ,)
_lowercase : List[Any] = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
_lowercase : Any = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] ,[0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] ,[0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
UpperCamelCase ,['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
UpperCamelCase ,['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
def _lowerCamelCase ( self : Tuple ) -> List[str]:
for trim_offsets, add_prefix_space in itertools.product([True, False] ,repeat=2 ):
_lowercase : str = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname ,use_fast=UpperCamelCase ,add_prefix_space=UpperCamelCase ,trim_offsets=UpperCamelCase )
_lowercase : Tuple = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
_lowercase : int = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['add_prefix_space'] ,UpperCamelCase )
self.assertEqual(post_processor_state['add_prefix_space'] ,UpperCamelCase )
self.assertEqual(post_processor_state['trim_offsets'] ,UpperCamelCase )
def _lowerCamelCase ( self : List[Any] ) -> str:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_lowercase : List[Any] = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
_lowercase : Optional[int] = F'''{text_of_1_token} {text_of_1_token}'''
_lowercase : Tuple = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase ,use_fast=UpperCamelCase ,add_prefix_space=UpperCamelCase ,trim_offsets=UpperCamelCase )
_lowercase : Dict = tokenizer_r(UpperCamelCase ,return_offsets_mapping=UpperCamelCase ,add_special_tokens=UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] ,(0, len(UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] ,(len(UpperCamelCase ) + 1, len(UpperCamelCase ) + 1 + len(UpperCamelCase )) ,)
_lowercase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase ,use_fast=UpperCamelCase ,add_prefix_space=UpperCamelCase ,trim_offsets=UpperCamelCase )
_lowercase : int = tokenizer_r(UpperCamelCase ,return_offsets_mapping=UpperCamelCase ,add_special_tokens=UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] ,(0, len(UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] ,(len(UpperCamelCase ) + 1, len(UpperCamelCase ) + 1 + len(UpperCamelCase )) ,)
_lowercase : Any = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase ,use_fast=UpperCamelCase ,add_prefix_space=UpperCamelCase ,trim_offsets=UpperCamelCase )
_lowercase : int = tokenizer_r(UpperCamelCase ,return_offsets_mapping=UpperCamelCase ,add_special_tokens=UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] ,(0, len(UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] ,(len(UpperCamelCase ), len(UpperCamelCase ) + 1 + len(UpperCamelCase )) ,)
_lowercase : List[str] = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase ,use_fast=UpperCamelCase ,add_prefix_space=UpperCamelCase ,trim_offsets=UpperCamelCase )
_lowercase : Optional[int] = tokenizer_r(UpperCamelCase ,return_offsets_mapping=UpperCamelCase ,add_special_tokens=UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] ,(0, len(UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] ,(len(UpperCamelCase ), len(UpperCamelCase ) + 1 + len(UpperCamelCase )) ,)
_lowercase : Optional[int] = F''' {text}'''
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
_lowercase : Any = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase ,use_fast=UpperCamelCase ,add_prefix_space=UpperCamelCase ,trim_offsets=UpperCamelCase )
_lowercase : int = tokenizer_r(UpperCamelCase ,return_offsets_mapping=UpperCamelCase ,add_special_tokens=UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] ,(1, 1 + len(UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] ,(1 + len(UpperCamelCase ) + 1, 1 + len(UpperCamelCase ) + 1 + len(UpperCamelCase )) ,)
_lowercase : str = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase ,use_fast=UpperCamelCase ,add_prefix_space=UpperCamelCase ,trim_offsets=UpperCamelCase )
_lowercase : List[str] = tokenizer_r(UpperCamelCase ,return_offsets_mapping=UpperCamelCase ,add_special_tokens=UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] ,(0, 1 + len(UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] ,(1 + len(UpperCamelCase ), 1 + len(UpperCamelCase ) + 1 + len(UpperCamelCase )) ,)
_lowercase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase ,use_fast=UpperCamelCase ,add_prefix_space=UpperCamelCase ,trim_offsets=UpperCamelCase )
_lowercase : Union[str, Any] = tokenizer_r(UpperCamelCase ,return_offsets_mapping=UpperCamelCase ,add_special_tokens=UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] ,(0, 1 + len(UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] ,(1 + len(UpperCamelCase ), 1 + len(UpperCamelCase ) + 1 + len(UpperCamelCase )) ,) | 125 | 1 |
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> int:
"""simple docstring"""
return int(input_a == input_a == 0 )
def __UpperCAmelCase ( )-> None:
"""simple docstring"""
print("Truth Table of NOR Gate:" )
print("| Input 1 | Input 2 | Output |" )
print(F'''| 0 | 0 | {nor_gate(0 ,0 )} |''' )
print(F'''| 0 | 1 | {nor_gate(0 ,1 )} |''' )
print(F'''| 1 | 0 | {nor_gate(1 ,0 )} |''' )
print(F'''| 1 | 1 | {nor_gate(1 ,1 )} |''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 719 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class A_ (unittest.TestCase ):
"""simple docstring"""
def _A ( self :Any ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _A ( self :List[Any] ) -> List[str]:
'''simple docstring'''
snake_case_ : Any = 1
snake_case_ : Dict = 3
snake_case_ : Union[str, Any] = (32, 32)
snake_case_ : Optional[int] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowerCAmelCase__ )
return image
@property
def _A ( self :Optional[int] ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : List[str] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
return model
@property
def _A ( self :Dict ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : Optional[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def _A ( self :Dict ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : str = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_006 , )
return RobertaSeriesModelWithTransformation(lowerCAmelCase__ )
@property
def _A ( self :Any ) -> str:
'''simple docstring'''
def extract(*lowerCAmelCase__ :Any , **lowerCAmelCase__ :List[str] ):
class A_ :
"""simple docstring"""
def __init__( self :Optional[int] ) -> List[str]:
'''simple docstring'''
snake_case_ : str = torch.ones([0] )
def _A ( self :int , lowerCAmelCase__ :List[Any] ) -> Tuple:
'''simple docstring'''
self.pixel_values.to(lowerCAmelCase__ )
return self
return Out()
return extract
def _A ( self :int ) -> Dict:
'''simple docstring'''
snake_case_ : str = "cpu" # ensure determinism for the device-dependent torch.Generator
snake_case_ : str = self.dummy_cond_unet
snake_case_ : Optional[int] = PNDMScheduler(skip_prk_steps=lowerCAmelCase__ )
snake_case_ : Dict = self.dummy_vae
snake_case_ : Dict = self.dummy_text_encoder
snake_case_ : Optional[int] = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
snake_case_ : str = 77
snake_case_ : Any = self.dummy_image.to(lowerCAmelCase__ )
snake_case_ : Tuple = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
snake_case_ : Optional[Any] = AltDiffusionImgaImgPipeline(
unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , vae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , feature_extractor=self.dummy_extractor , )
snake_case_ : Union[str, Any] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=lowerCAmelCase__ )
snake_case_ : Optional[Any] = alt_pipe.to(lowerCAmelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
snake_case_ : Dict = "A painting of a squirrel eating a burger"
snake_case_ : List[str] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 )
snake_case_ : Dict = alt_pipe(
[prompt] , generator=lowerCAmelCase__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=lowerCAmelCase__ , )
snake_case_ : Any = output.images
snake_case_ : List[str] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 )
snake_case_ : Optional[Any] = alt_pipe(
[prompt] , generator=lowerCAmelCase__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=lowerCAmelCase__ , return_dict=lowerCAmelCase__ , )[0]
snake_case_ : Tuple = image[0, -3:, -3:, -1]
snake_case_ : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
snake_case_ : int = np.array([0.4_4_2_7, 0.3_7_3_1, 0.4_2_4_9, 0.4_9_4_1, 0.4_5_4_6, 0.4_1_4_8, 0.4_1_9_3, 0.4_6_6_6, 0.4_4_9_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def _A ( self :int ) -> List[str]:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.dummy_cond_unet
snake_case_ : Union[str, Any] = PNDMScheduler(skip_prk_steps=lowerCAmelCase__ )
snake_case_ : int = self.dummy_vae
snake_case_ : List[Any] = self.dummy_text_encoder
snake_case_ : int = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
snake_case_ : int = 77
snake_case_ : Dict = self.dummy_image.to(lowerCAmelCase__ )
# put models in fp16
snake_case_ : Optional[Any] = unet.half()
snake_case_ : Tuple = vae.half()
snake_case_ : List[str] = bert.half()
# make sure here that pndm scheduler skips prk
snake_case_ : Optional[int] = AltDiffusionImgaImgPipeline(
unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , vae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , feature_extractor=self.dummy_extractor , )
snake_case_ : List[str] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=lowerCAmelCase__ )
snake_case_ : Optional[Any] = alt_pipe.to(lowerCAmelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
snake_case_ : List[Any] = "A painting of a squirrel eating a burger"
snake_case_ : str = torch.manual_seed(0 )
snake_case_ : Any = alt_pipe(
[prompt] , generator=lowerCAmelCase__ , num_inference_steps=2 , output_type="np" , image=lowerCAmelCase__ , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def _A ( self :Optional[int] ) -> Any:
'''simple docstring'''
snake_case_ : Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
# resize to resolution that is divisible by 8 but not 16 or 32
snake_case_ : str = init_image.resize((760, 504) )
snake_case_ : Optional[Any] = "BAAI/AltDiffusion"
snake_case_ : int = AltDiffusionImgaImgPipeline.from_pretrained(
lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
snake_case_ : Tuple = "A fantasy landscape, trending on artstation"
snake_case_ : int = torch.manual_seed(0 )
snake_case_ : List[str] = pipe(
prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , strength=0.7_5 , guidance_scale=7.5 , generator=lowerCAmelCase__ , output_type="np" , )
snake_case_ : str = output.images[0]
snake_case_ : List[Any] = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
snake_case_ : Tuple = np.array([0.9_3_5_8, 0.9_3_9_7, 0.9_5_9_9, 0.9_9_0_1, 1.0_0_0_0, 1.0_0_0_0, 0.9_8_8_2, 1.0_0_0_0, 1.0_0_0_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class A_ (unittest.TestCase ):
"""simple docstring"""
def _A ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A ( self :str ) -> Any:
'''simple docstring'''
snake_case_ : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
snake_case_ : List[Any] = init_image.resize((768, 512) )
snake_case_ : Tuple = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy" )
snake_case_ : Any = "BAAI/AltDiffusion"
snake_case_ : List[str] = AltDiffusionImgaImgPipeline.from_pretrained(
lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
snake_case_ : Tuple = "A fantasy landscape, trending on artstation"
snake_case_ : Tuple = torch.manual_seed(0 )
snake_case_ : List[Any] = pipe(
prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , strength=0.7_5 , guidance_scale=7.5 , generator=lowerCAmelCase__ , output_type="np" , )
snake_case_ : Optional[int] = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1E-2
| 656 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"""bigcode/gpt_bigcode-santacoder""": """https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json""",
}
class UpperCamelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
A__ : Union[str, Any] = "gpt_bigcode"
A__ : Any = ["past_key_values"]
A__ : Dict = {
"hidden_size": "n_embd",
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , SCREAMING_SNAKE_CASE__=50257 , SCREAMING_SNAKE_CASE__=1024 , SCREAMING_SNAKE_CASE__=768 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__="gelu_pytorch_tanh" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=1e-5 , SCREAMING_SNAKE_CASE__=0.0_2 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=50256 , SCREAMING_SNAKE_CASE__=50256 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , **SCREAMING_SNAKE_CASE__ , ) -> Optional[int]:
A__ = vocab_size
A__ = n_positions
A__ = n_embd
A__ = n_layer
A__ = n_head
A__ = n_inner
A__ = activation_function
A__ = resid_pdrop
A__ = embd_pdrop
A__ = attn_pdrop
A__ = layer_norm_epsilon
A__ = initializer_range
A__ = scale_attn_weights
A__ = use_cache
A__ = attention_softmax_in_fpaa
A__ = scale_attention_softmax_in_fpaa
A__ = multi_query
A__ = bos_token_id
A__ = eos_token_id
super().__init__(bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
| 104 | from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase_ :
def __init__( self : Any , _A : int , _A : int=12 , _A : int=7 , _A : Tuple=True , _A : Optional[int]=True , _A : Union[str, Any]=True , _A : str=99 , _A : str=32 , _A : int=32 , _A : Optional[Any]=2 , _A : Dict=4 , _A : int=37 , _A : List[Any]=0.1 , _A : str=0.1 , _A : Any=512 , _A : int=0.02 , _A : Optional[Any]=0 , _A : Dict=None , ):
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_input_mask
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = projection_dim
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = initializer_range
_UpperCamelCase = scope
_UpperCamelCase = bos_token_id
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase = None
if self.use_input_mask:
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
_UpperCamelCase = input_mask.numpy()
_UpperCamelCase , _UpperCamelCase = input_mask.shape
_UpperCamelCase = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_A ):
_UpperCamelCase = 1
_UpperCamelCase = 0
_UpperCamelCase = self.get_config()
return config, input_ids, tf.convert_to_tensor(_A )
def UpperCamelCase_ ( self : str ):
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def UpperCamelCase_ ( self : List[str] , _A : Tuple , _A : str , _A : Optional[Any] ):
_UpperCamelCase = TFBlipTextModel(config=_A )
_UpperCamelCase = model(_A , attention_mask=_A , training=_A )
_UpperCamelCase = model(_A , training=_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase_ ( self : Tuple ):
_UpperCamelCase = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = config_and_inputs
_UpperCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( __lowercase, unittest.TestCase ):
UpperCAmelCase = (TFBlipTextModel,) if is_tf_available() else ()
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = BlipTextModelTester(self )
_UpperCamelCase = ConfigTester(self , config_class=_A , hidden_size=37 )
def UpperCamelCase_ ( self : Dict ):
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def UpperCamelCase_ ( self : List[Any] ):
pass
def UpperCamelCase_ ( self : Tuple ):
pass
@unittest.skip(reason='''Blip does not use inputs_embeds''' )
def UpperCamelCase_ ( self : Dict ):
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def UpperCamelCase_ ( self : Dict ):
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def UpperCamelCase_ ( self : List[str] ):
pass
@slow
def UpperCamelCase_ ( self : Optional[int] ):
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = TFBlipTextModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def UpperCamelCase_ ( self : int , _A : Optional[int]=True ):
super().test_pt_tf_model_equivalence(allow_missing_keys=_A )
| 10 | 0 |
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCamelCase__ = 16
lowerCamelCase__ = 32
def lowercase_ ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Dict = 16 ):
"""simple docstring"""
snake_case__ : Union[str, Any] =AutoTokenizer.from_pretrained('''bert-base-cased''' )
snake_case__ : int =DatasetDict(
{
'''train''': dataset['''train'''].select(__SCREAMING_SNAKE_CASE ),
'''validation''': dataset['''train'''].select(__SCREAMING_SNAKE_CASE ),
'''test''': dataset['''validation'''],
} )
def tokenize_function(SCREAMING_SNAKE_CASE : Any ):
# max_length=None => use the model max length (it's actually the default)
snake_case__ : str =tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
snake_case__ : List[Any] =datasets.map(
__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case__ : Dict =tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(SCREAMING_SNAKE_CASE : Union[str, Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
snake_case__ : Dict =1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
snake_case__ : Optional[int] =16
elif accelerator.mixed_precision != "no":
snake_case__ : Tuple =8
else:
snake_case__ : Union[str, Any] =None
return tokenizer.pad(
__SCREAMING_SNAKE_CASE , padding='''longest''' , max_length=__SCREAMING_SNAKE_CASE , pad_to_multiple_of=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' , )
# Instantiate dataloaders.
snake_case__ : Optional[int] =DataLoader(
tokenized_datasets['''train'''] , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE )
snake_case__ : int =DataLoader(
tokenized_datasets['''validation'''] , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple =DataLoader(
tokenized_datasets['''test'''] , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader, test_dataloader
def lowercase_ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
snake_case__ : Optional[Any] =[]
# Download the dataset
snake_case__ : Tuple =load_dataset('''glue''' , '''mrpc''' )
# Create our splits
snake_case__ : Union[str, Any] =StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
snake_case__ : Optional[Any] =Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case__ : Optional[Any] =config["lr"]
snake_case__ : str =int(config['''num_epochs'''] )
snake_case__ : Tuple =int(config['''seed'''] )
snake_case__ : Optional[Any] =int(config['''batch_size'''] )
snake_case__ : List[Any] =evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
snake_case__ : List[str] =1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
snake_case__ : Union[str, Any] =batch_size // MAX_GPU_BATCH_SIZE
snake_case__ : Union[str, Any] =MAX_GPU_BATCH_SIZE
set_seed(__SCREAMING_SNAKE_CASE )
# New Code #
# Create our folds:
snake_case__ : int =kfold.split(np.zeros(datasets['''train'''].num_rows ) , datasets['''train''']['''label'''] )
snake_case__ : Optional[int] =[]
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(__SCREAMING_SNAKE_CASE ):
snake_case__ : Tuple =get_fold_dataloaders(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case__ : Optional[int] =AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__SCREAMING_SNAKE_CASE )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
snake_case__ : Union[str, Any] =model.to(accelerator.device )
# Instantiate optimizer
snake_case__ : Any =AdamW(params=model.parameters() , lr=__SCREAMING_SNAKE_CASE )
# Instantiate scheduler
snake_case__ : List[str] =get_linear_schedule_with_warmup(
optimizer=__SCREAMING_SNAKE_CASE , num_warmup_steps=1_00 , num_training_steps=(len(__SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case__ : str =accelerator.prepare(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Now we train the model
for epoch in range(__SCREAMING_SNAKE_CASE ):
model.train()
for step, batch in enumerate(__SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
snake_case__ : List[Any] =model(**__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] =outputs.loss
snake_case__ : Union[str, Any] =loss / gradient_accumulation_steps
accelerator.backward(__SCREAMING_SNAKE_CASE )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case__ : Dict =model(**__SCREAMING_SNAKE_CASE )
snake_case__ : str =outputs.logits.argmax(dim=-1 )
snake_case__ : Dict =accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=__SCREAMING_SNAKE_CASE , references=__SCREAMING_SNAKE_CASE , )
snake_case__ : str =metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , __SCREAMING_SNAKE_CASE )
# New Code #
# We also run predictions on the test set at the very end
snake_case__ : Any =[]
for step, batch in enumerate(__SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case__ : int =model(**__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple =outputs.logits
snake_case__ : List[Any] =accelerator.gather_for_metrics((predictions, batch['''labels''']) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(__SCREAMING_SNAKE_CASE , dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
snake_case__ : List[Any] =torch.cat(__SCREAMING_SNAKE_CASE , dim=0 )
snake_case__ : Any =torch.stack(__SCREAMING_SNAKE_CASE , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
snake_case__ : Tuple =metric.compute(predictions=__SCREAMING_SNAKE_CASE , references=__SCREAMING_SNAKE_CASE )
accelerator.print('''Average test metrics from all folds:''' , __SCREAMING_SNAKE_CASE )
def lowercase_ ( ):
"""simple docstring"""
snake_case__ : Tuple =argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
# New Code #
parser.add_argument('''--num_folds''' , type=__SCREAMING_SNAKE_CASE , default=3 , help='''The number of splits to perform across the dataset''' )
snake_case__ : List[Any] =parser.parse_args()
snake_case__ : Optional[Any] ={"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 704 |
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def lowercase_ ( SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
snake_case__ : Optional[Any] =[]
for line in lines:
snake_case__ : Optional[Any] =re.sub(R'''#.*''' , '''''' , SCREAMING_SNAKE_CASE ) # remove comments
if line:
filtered_lines.append(SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] ='''\n'''.join(SCREAMING_SNAKE_CASE )
# Make a hash from all this code
snake_case__ : str =full_str.encode('''utf-8''' )
return shaaaa(SCREAMING_SNAKE_CASE ).hexdigest()
# get importable module names and hash for caching
lowerCamelCase__ = {
'''csv''': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
'''json''': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
'''pandas''': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
'''parquet''': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
'''arrow''': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
'''text''': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
'''imagefolder''': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
'''audiofolder''': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
lowerCamelCase__ = {
'''.csv''': ('''csv''', {}),
'''.tsv''': ('''csv''', {'''sep''': '''\t'''}),
'''.json''': ('''json''', {}),
'''.jsonl''': ('''json''', {}),
'''.parquet''': ('''parquet''', {}),
'''.arrow''': ('''arrow''', {}),
'''.txt''': ('''text''', {}),
}
_EXTENSION_TO_MODULE.update({ext: ('''imagefolder''', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('''imagefolder''', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ('''audiofolder''', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('''audiofolder''', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
lowerCamelCase__ = {'''imagefolder''', '''audiofolder'''}
# Used to filter data files based on extensions given a module name
lowerCamelCase__ = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append('''.zip''')
_MODULE_TO_EXTENSIONS["audiofolder"].append('''.zip''')
| 408 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase :Any = {
'configuration_jukebox': [
'JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP',
'JukeboxConfig',
'JukeboxPriorConfig',
'JukeboxVQVAEConfig',
],
'tokenization_jukebox': ['JukeboxTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :List[str] = [
'JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST',
'JukeboxModel',
'JukeboxPreTrainedModel',
'JukeboxVQVAE',
'JukeboxPrior',
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
_lowerCAmelCase :Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 506 |
"""simple docstring"""
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _UpperCAmelCase ( a ,unittest.TestCase ):
'''simple docstring'''
a__ =CodeGenTokenizer
a__ =CodeGenTokenizerFast
a__ =True
a__ ={'''add_prefix_space''': True}
a__ =False
def __lowerCAmelCase ( self ) -> Optional[Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_UpperCAmelCase : List[str] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
'''<|endoftext|>''',
]
_UpperCAmelCase : str = dict(zip(A , range(len(A ) ) ) )
_UpperCAmelCase : Dict = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
_UpperCAmelCase : Union[str, Any] = {'''unk_token''': '''<unk>'''}
_UpperCAmelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_UpperCAmelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(A ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(A ) )
def __lowerCAmelCase ( self , **A ) -> Tuple:
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **A )
def __lowerCAmelCase ( self , **A ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **A )
def __lowerCAmelCase ( self , A ) -> Dict:
_UpperCAmelCase : List[Any] = '''lower newer'''
_UpperCAmelCase : Dict = '''lower newer'''
return input_text, output_text
def __lowerCAmelCase ( self ) -> Tuple:
_UpperCAmelCase : int = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_UpperCAmelCase : Optional[int] = '''lower newer'''
_UpperCAmelCase : Tuple = ['''\u0120low''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
_UpperCAmelCase : List[Any] = tokenizer.tokenize(A , add_prefix_space=A )
self.assertListEqual(A , A )
_UpperCAmelCase : Dict = tokens + [tokenizer.unk_token]
_UpperCAmelCase : Dict = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , A )
def __lowerCAmelCase ( self ) -> List[Any]:
if not self.test_rust_tokenizer:
return
_UpperCAmelCase : Optional[int] = self.get_tokenizer()
_UpperCAmelCase : Optional[int] = self.get_rust_tokenizer(add_prefix_space=A )
_UpperCAmelCase : Dict = '''lower newer'''
# Testing tokenization
_UpperCAmelCase : Optional[Any] = tokenizer.tokenize(A , add_prefix_space=A )
_UpperCAmelCase : Any = rust_tokenizer.tokenize(A )
self.assertListEqual(A , A )
# Testing conversion to ids without special tokens
_UpperCAmelCase : Dict = tokenizer.encode(A , add_special_tokens=A , add_prefix_space=A )
_UpperCAmelCase : List[str] = rust_tokenizer.encode(A , add_special_tokens=A )
self.assertListEqual(A , A )
# Testing conversion to ids with special tokens
_UpperCAmelCase : List[Any] = self.get_rust_tokenizer(add_prefix_space=A )
_UpperCAmelCase : List[str] = tokenizer.encode(A , add_prefix_space=A )
_UpperCAmelCase : Tuple = rust_tokenizer.encode(A )
self.assertListEqual(A , A )
# Testing the unknown token
_UpperCAmelCase : List[str] = tokens + [rust_tokenizer.unk_token]
_UpperCAmelCase : int = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(A ) , A )
def __lowerCAmelCase ( self , *A , **A ) -> List[str]:
# It's very difficult to mix/test pretokenization with byte-level
# And get both CodeGen and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def __lowerCAmelCase ( self , A=1_5 ) -> List[Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_UpperCAmelCase : List[Any] = self.rust_tokenizer_class.from_pretrained(A , **A )
# Simple input
_UpperCAmelCase : str = '''This is a simple input'''
_UpperCAmelCase : Tuple = ['''This is a simple input 1''', '''This is a simple input 2''']
_UpperCAmelCase : Optional[Any] = ('''This is a simple input''', '''This is a pair''')
_UpperCAmelCase : str = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(A , tokenizer_r.encode , A , max_length=A , padding='''max_length''' )
# Simple input
self.assertRaises(A , tokenizer_r.encode_plus , A , max_length=A , padding='''max_length''' )
# Simple input
self.assertRaises(
A , tokenizer_r.batch_encode_plus , A , max_length=A , padding='''max_length''' , )
# Pair input
self.assertRaises(A , tokenizer_r.encode , A , max_length=A , padding='''max_length''' )
# Pair input
self.assertRaises(A , tokenizer_r.encode_plus , A , max_length=A , padding='''max_length''' )
# Pair input
self.assertRaises(
A , tokenizer_r.batch_encode_plus , A , max_length=A , padding='''max_length''' , )
def __lowerCAmelCase ( self ) -> List[str]:
_UpperCAmelCase : Optional[Any] = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token='''<pad>''' )
# Simple input
_UpperCAmelCase : Optional[int] = '''This is a simple input'''
_UpperCAmelCase : Dict = ['''This is a simple input looooooooong''', '''This is a simple input''']
_UpperCAmelCase : Union[str, Any] = ('''This is a simple input''', '''This is a pair''')
_UpperCAmelCase : Optional[Any] = [
('''This is a simple input loooooong''', '''This is a simple input'''),
('''This is a simple pair loooooong''', '''This is a simple pair'''),
]
_UpperCAmelCase : List[str] = tokenizer.pad_token_id
_UpperCAmelCase : Tuple = tokenizer(A , padding='''max_length''' , max_length=3_0 , return_tensors='''np''' )
_UpperCAmelCase : Optional[Any] = tokenizer(A , padding=A , truncate=A , return_tensors='''np''' )
_UpperCAmelCase : int = tokenizer(*A , padding='''max_length''' , max_length=6_0 , return_tensors='''np''' )
_UpperCAmelCase : List[str] = tokenizer(A , padding=A , truncate=A , return_tensors='''np''' )
# s
# test single string max_length padding
self.assertEqual(out_s['''input_ids'''].shape[-1] , 3_0 )
self.assertTrue(pad_token_id in out_s['''input_ids'''] )
self.assertTrue(0 in out_s['''attention_mask'''] )
# s2
# test automatic padding
self.assertEqual(out_sa['''input_ids'''].shape[-1] , 3_3 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['''input_ids'''][0] )
self.assertFalse(0 in out_sa['''attention_mask'''][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['''input_ids'''][1] )
self.assertTrue(0 in out_sa['''attention_mask'''][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['''input_ids'''].shape[-1] , 6_0 )
self.assertTrue(pad_token_id in out_p['''input_ids'''] )
self.assertTrue(0 in out_p['''attention_mask'''] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['''input_ids'''].shape[-1] , 5_2 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['''input_ids'''][0] )
self.assertFalse(0 in out_pa['''attention_mask'''][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['''input_ids'''][1] )
self.assertTrue(0 in out_pa['''attention_mask'''][1] )
def __lowerCAmelCase ( self ) -> List[str]:
_UpperCAmelCase : Optional[int] = '''$$$'''
_UpperCAmelCase : Optional[int] = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=A , add_bos_token=A )
_UpperCAmelCase : Tuple = '''This is a simple input'''
_UpperCAmelCase : int = ['''This is a simple input 1''', '''This is a simple input 2''']
_UpperCAmelCase : List[str] = tokenizer.bos_token_id
_UpperCAmelCase : str = tokenizer(A )
_UpperCAmelCase : Optional[Any] = tokenizer(A )
self.assertEqual(out_s.input_ids[0] , A )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
_UpperCAmelCase : Tuple = tokenizer.decode(out_s.input_ids )
_UpperCAmelCase : List[str] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , A )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def __lowerCAmelCase ( self ) -> Optional[Any]:
_UpperCAmelCase : Optional[Any] = CodeGenTokenizer.from_pretrained('''Salesforce/codegen-350M-mono''' )
_UpperCAmelCase : Any = '''\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#'''
_UpperCAmelCase : Union[str, Any] = '''\nif len_a > len_b: result = a\nelse: result = b'''
_UpperCAmelCase : Any = tokenizer.encode(A )
_UpperCAmelCase : Tuple = ['''^#''', re.escape('''<|endoftext|>''' ), '''^\'\'\'''', '''^"""''', '''\n\n\n''']
_UpperCAmelCase : List[str] = tokenizer.decode(A , truncate_before_pattern=A )
self.assertEqual(A , A )
def __lowerCAmelCase ( self ) -> Optional[Any]:
pass
| 506 | 1 |
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
__lowerCamelCase : Any = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
'''--original_config_file''',
default=None,
type=str,
help='''The YAML config file corresponding to the original architecture.''',
)
parser.add_argument(
'''--num_in_channels''',
default=None,
type=int,
help='''The number of input channels. If `None` number of input channels will be automatically inferred.''',
)
parser.add_argument(
'''--scheduler_type''',
default='''pndm''',
type=str,
help='''Type of scheduler to use. Should be one of [\'pndm\', \'lms\', \'ddim\', \'euler\', \'euler-ancestral\', \'dpm\']''',
)
parser.add_argument(
'''--pipeline_type''',
default=None,
type=str,
help=(
'''The pipeline type. One of \'FrozenOpenCLIPEmbedder\', \'FrozenCLIPEmbedder\', \'PaintByExample\''''
'''. If `None` pipeline will be automatically inferred.'''
),
)
parser.add_argument(
'''--image_size''',
default=None,
type=int,
help=(
'''The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'''
''' Base. Use 768 for Stable Diffusion v2.'''
),
)
parser.add_argument(
'''--prediction_type''',
default=None,
type=str,
help=(
'''The prediction type that the model was trained on. Use \'epsilon\' for Stable Diffusion v1.X and Stable'''
''' Diffusion v2 Base. Use \'v_prediction\' for Stable Diffusion v2.'''
),
)
parser.add_argument(
'''--extract_ema''',
action='''store_true''',
help=(
'''Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'''
''' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'''
''' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'''
),
)
parser.add_argument(
'''--upcast_attention''',
action='''store_true''',
help=(
'''Whether the attention computation should always be upcasted. This is necessary when running stable'''
''' diffusion 2.1.'''
),
)
parser.add_argument(
'''--from_safetensors''',
action='''store_true''',
help='''If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.''',
)
parser.add_argument(
'''--to_safetensors''',
action='''store_true''',
help='''Whether to store pipeline in safetensors format or not.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
parser.add_argument(
'''--stable_unclip''',
type=str,
default=None,
required=False,
help='''Set if this is a stable unCLIP model. One of \'txt2img\' or \'img2img\'.''',
)
parser.add_argument(
'''--stable_unclip_prior''',
type=str,
default=None,
required=False,
help='''Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.''',
)
parser.add_argument(
'''--clip_stats_path''',
type=str,
help='''Path to the clip stats file. Only required if the stable unclip model\'s config specifies `model.params.noise_aug_config.params.clip_stats_path`.''',
required=False,
)
parser.add_argument(
'''--controlnet''', action='''store_true''', default=None, help='''Set flag if this is a controlnet checkpoint.'''
)
parser.add_argument('''--half''', action='''store_true''', help='''Save weights in half precision.''')
parser.add_argument(
'''--vae_path''',
type=str,
default=None,
required=False,
help='''Set to a path, hub id to an already converted vae to not convert it again.''',
)
__lowerCamelCase : Any = parser.parse_args()
__lowerCamelCase : List[str] = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 714 | from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : list ) -> float:
"""simple docstring"""
if not nums:
raise ValueError("""List is empty""" )
return sum(__UpperCamelCase ) / len(__UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 379 | 0 |
"""simple docstring"""
import random
from typing import Any
def lowercase ( lowerCAmelCase__ ):
for _ in range(len(lowerCAmelCase__ ) ):
lowerCamelCase_ = random.randint(0 ,len(lowerCAmelCase__ ) - 1 )
lowerCamelCase_ = random.randint(0 ,len(lowerCAmelCase__ ) - 1 )
lowerCamelCase_ , lowerCamelCase_ = data[b], data[a]
return data
if __name__ == "__main__":
A_ = [0, 1, 2, 3, 4, 5, 6, 7]
A_ = ["""python""", """says""", """hello""", """!"""]
print("""Fisher-Yates Shuffle:""")
print("""List""", integers, strings)
print("""FY Shuffle""", fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 29 |
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
lowerCamelCase : List[str] = str(bin(SCREAMING_SNAKE_CASE_ ) )[2:] # remove the leading "0b"
lowerCamelCase : Any = str(bin(SCREAMING_SNAKE_CASE_ ) )[2:] # remove the leading "0b"
lowerCamelCase : Optional[int] = max(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(SCREAMING_SNAKE_CASE_ ) , b_binary.zfill(SCREAMING_SNAKE_CASE_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 340 | 0 |
'''simple docstring'''
from __future__ import annotations
import requests
def _UpperCamelCase ( lowerCAmelCase_ ) ->dict:
UpperCAmelCase = F"""https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty"""
return requests.get(lowerCAmelCase_ ).json()
def _UpperCamelCase ( lowerCAmelCase_ = 1_0 ) ->list[dict]:
UpperCAmelCase = "https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty"
UpperCAmelCase = requests.get(lowerCAmelCase_ ).json()[:max_stories]
return [get_hackernews_story(lowerCAmelCase_ ) for story_id in story_ids]
def _UpperCamelCase ( lowerCAmelCase_ = 1_0 ) ->str:
UpperCAmelCase = hackernews_top_stories(lowerCAmelCase_ )
return "\n".join("""* [{title}]({url})""".format(**lowerCAmelCase_ ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 719 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
"""facebook/nllb-moe-54B""": """https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json""",
}
class __lowercase ( __snake_case ):
UpperCamelCase = '''nllb-moe'''
UpperCamelCase = ['''past_key_values''']
UpperCamelCase = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : Optional[int] , __lowerCamelCase : Optional[Any]=1_2_8_1_1_2 , __lowerCamelCase : Dict=1_0_2_4 , __lowerCamelCase : Optional[int]=1_2 , __lowerCamelCase : Union[str, Any]=4_0_9_6 , __lowerCamelCase : List[str]=1_6 , __lowerCamelCase : List[str]=1_2 , __lowerCamelCase : int=4_0_9_6 , __lowerCamelCase : Tuple=1_6 , __lowerCamelCase : str=0.05 , __lowerCamelCase : List[str]=0.05 , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Tuple=True , __lowerCamelCase : str="relu" , __lowerCamelCase : Dict=1_0_2_4 , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : List[Any]=0.0 , __lowerCamelCase : Optional[Any]=0.02 , __lowerCamelCase : Dict=2 , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Any=False , __lowerCamelCase : Tuple="float32" , __lowerCamelCase : Any=False , __lowerCamelCase : Optional[int]=1_2_8 , __lowerCamelCase : List[str]=6_4 , __lowerCamelCase : List[Any]=4 , __lowerCamelCase : Tuple=4 , __lowerCamelCase : str=0.001 , __lowerCamelCase : Optional[int]=0.001 , __lowerCamelCase : Tuple="all" , __lowerCamelCase : Any=False , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : List[str]=1.0 , __lowerCamelCase : Dict=0.2 , __lowerCamelCase : Union[str, Any]=1 , __lowerCamelCase : int=0 , __lowerCamelCase : Dict=2 , __lowerCamelCase : int=False , **__lowerCamelCase : str , ) -> int:
"""simple docstring"""
UpperCAmelCase = vocab_size
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = d_model
UpperCAmelCase = encoder_ffn_dim
UpperCAmelCase = encoder_layers
UpperCAmelCase = encoder_attention_heads
UpperCAmelCase = decoder_ffn_dim
UpperCAmelCase = decoder_layers
UpperCAmelCase = decoder_attention_heads
UpperCAmelCase = dropout
UpperCAmelCase = attention_dropout
UpperCAmelCase = activation_dropout
UpperCAmelCase = activation_function
UpperCAmelCase = init_std
UpperCAmelCase = encoder_layerdrop
UpperCAmelCase = decoder_layerdrop
UpperCAmelCase = use_cache
UpperCAmelCase = encoder_layers
UpperCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCAmelCase = router_z_loss_coef
UpperCAmelCase = router_aux_loss_coef
UpperCAmelCase = decoder_sparse_step
UpperCAmelCase = encoder_sparse_step
UpperCAmelCase = num_experts
UpperCAmelCase = expert_capacity
UpperCAmelCase = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""" )
UpperCAmelCase = router_dtype
UpperCAmelCase = router_ignore_padding_tokens
UpperCAmelCase = batch_prioritized_routing
UpperCAmelCase = second_expert_policy
UpperCAmelCase = normalize_router_prob_before_dropping
UpperCAmelCase = moe_eval_capacity_token_fraction
UpperCAmelCase = moe_token_dropout
UpperCAmelCase = output_router_logits
super().__init__(
pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , is_encoder_decoder=__lowerCamelCase , decoder_start_token_id=__lowerCamelCase , **__lowerCamelCase , )
| 627 | 0 |
"""simple docstring"""
from functools import lru_cache
def __UpperCAmelCase ( __UpperCamelCase ):
__lowercase : str = 2
__lowercase : int = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(__UpperCamelCase )
if n > 1:
factors.add(__UpperCamelCase )
return factors
@lru_cache
def __UpperCAmelCase ( __UpperCamelCase ):
return len(unique_prime_factors(__UpperCamelCase ) )
def __UpperCAmelCase ( __UpperCamelCase ):
return len(set(__UpperCamelCase ) ) in (0, 1)
def __UpperCAmelCase ( __UpperCamelCase ):
__lowercase : Optional[Any] = 2
while True:
# Increment each value of a generated range
__lowercase : Dict = [base + i for i in range(__UpperCamelCase )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
__lowercase : Optional[int] = [upf_len(__UpperCamelCase ) for x in group]
checker.append(__UpperCamelCase )
# If all numbers in the list are equal, return the group variable.
if equality(__UpperCamelCase ):
return group
# Increment our base variable by 1
base += 1
def __UpperCAmelCase ( __UpperCamelCase = 4 ):
__lowercase : int = run(__UpperCamelCase )
return results[0] if len(__UpperCamelCase ) else None
if __name__ == "__main__":
print(solution())
| 76 |
"""simple docstring"""
import gc
import threading
import time
import psutil
import torch
class UpperCAmelCase_ :
def __init__( self ) -> str:
__lowercase : List[Any] = psutil.Process()
__lowercase : Any = False
def _lowerCamelCase ( self ) -> Union[str, Any]:
__lowercase : Optional[Any] = -1
while True:
__lowercase : List[str] = max(self.process.memory_info().rss , self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def _lowerCamelCase ( self ) -> Optional[Any]:
__lowercase : List[Any] = True
__lowercase : List[Any] = threading.Thread(target=self.peak_monitor )
__lowercase : Optional[int] = True
self.thread.start()
def _lowerCamelCase ( self ) -> Optional[Any]:
__lowercase : Union[str, Any] = False
self.thread.join()
return self.cpu_memory_peak
a_ = PeakCPUMemory()
def __UpperCAmelCase ( ):
# Time
__lowercase : Union[str, Any] = {'''time''': time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
__lowercase : List[Any] = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
__lowercase : List[str] = torch.cuda.memory_allocated(__UpperCamelCase )
torch.cuda.reset_peak_memory_stats()
return measures
def __UpperCAmelCase ( __UpperCamelCase ):
# Time
__lowercase : List[Any] = {'''time''': time.time() - start_measures['''time''']}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
__lowercase : Union[str, Any] = (psutil.Process().memory_info().rss - start_measures['''cpu''']) / 2**20
__lowercase : Dict = (cpu_peak_tracker.stop() - start_measures['''cpu''']) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
__lowercase : str = (torch.cuda.memory_allocated(__UpperCamelCase ) - start_measures[str(__UpperCamelCase )]) / 2**20
__lowercase : Optional[int] = (torch.cuda.max_memory_allocated(__UpperCamelCase ) - start_measures[str(__UpperCamelCase )]) / 2**20
return measures
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
print(f"""{description}:""" )
print(f"""- Time: {measures["time"]:.2f}s""" )
for i in range(torch.cuda.device_count() ):
print(f"""- GPU {i} allocated: {measures[str(__UpperCamelCase )]:.2f}MiB""" )
__lowercase : Dict = measures[f"""{i}-peak"""]
print(f"""- GPU {i} peak: {peak:.2f}MiB""" )
print(f"""- CPU RAM allocated: {measures["cpu"]:.2f}MiB""" )
print(f"""- CPU RAM peak: {measures["cpu-peak"]:.2f}MiB""" )
| 76 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
A__: Optional[int] = {
'''configuration_gpt_neo''': ['''GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoConfig''', '''GPTNeoOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: str = [
'''GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoForCausalLM''',
'''GPTNeoForQuestionAnswering''',
'''GPTNeoForSequenceClassification''',
'''GPTNeoForTokenClassification''',
'''GPTNeoModel''',
'''GPTNeoPreTrainedModel''',
'''load_tf_weights_in_gpt_neo''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: Any = [
'''FlaxGPTNeoForCausalLM''',
'''FlaxGPTNeoModel''',
'''FlaxGPTNeoPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
A__: int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 221 |
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class _a :
"""simple docstring"""
def __init__( self: Tuple , __lowerCamelCase: str = "cpu" , __lowerCamelCase: str = "openai/clip-vit-large-patch14" ):
'''simple docstring'''
UpperCamelCase__: Dict = device
UpperCamelCase__: Any = CLIPTokenizerFast.from_pretrained(__lowerCamelCase )
UpperCamelCase__: Optional[int] = [0.48_145_466, 0.4_578_275, 0.40_821_073]
UpperCamelCase__: int = [0.26_862_954, 0.26_130_258, 0.27_577_711]
UpperCamelCase__: Optional[Any] = torchvision.transforms.Normalize(self.image_mean , self.image_std )
UpperCamelCase__: int = torchvision.transforms.Resize(224 )
UpperCamelCase__: int = torchvision.transforms.CenterCrop(224 )
def UpperCAmelCase_ ( self: Optional[Any] , __lowerCamelCase: int ):
'''simple docstring'''
UpperCamelCase__: Tuple = self.resize(__lowerCamelCase )
UpperCamelCase__: List[str] = self.center_crop(__lowerCamelCase )
UpperCamelCase__: int = self.normalize(__lowerCamelCase )
return images
def __call__( self: List[Any] , __lowerCamelCase: int=None , __lowerCamelCase: List[Any]=None , **__lowerCamelCase: Tuple ):
'''simple docstring'''
UpperCamelCase__: Optional[Any] = self.tokenizer(text=__lowerCamelCase , **__lowerCamelCase )
UpperCamelCase__: int = self.preprocess_img(__lowerCamelCase )
UpperCamelCase__: int = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class _a ( nn.Module):
"""simple docstring"""
def __init__( self: Tuple , __lowerCamelCase: Union[str, Any]=10 , __lowerCamelCase: Dict=0.01 , __lowerCamelCase: Optional[int]=None , __lowerCamelCase: Optional[Any]=None , __lowerCamelCase: Dict=None , __lowerCamelCase: List[Any]=None , __lowerCamelCase: str=None , __lowerCamelCase: Optional[Any]=None , __lowerCamelCase: List[str]=False , __lowerCamelCase: Tuple=True , __lowerCamelCase: Tuple="image" , __lowerCamelCase: List[Any]=True , __lowerCamelCase: Dict=False , __lowerCamelCase: List[str]=False , __lowerCamelCase: str=False , ):
'''simple docstring'''
super().__init__()
UpperCamelCase__: List[str] = None
UpperCamelCase__: int = device if device else get_device()
if vqgan:
UpperCamelCase__: int = vqgan
else:
UpperCamelCase__: Union[str, Any] = load_vqgan(self.device , conf_path=__lowerCamelCase , ckpt_path=__lowerCamelCase )
self.vqgan.eval()
if clip:
UpperCamelCase__: int = clip
else:
UpperCamelCase__: str = CLIPModel.from_pretrained("openai/clip-vit-base-patch32" )
self.clip.to(self.device )
UpperCamelCase__: Optional[Any] = ProcessorGradientFlow(device=self.device )
UpperCamelCase__: Optional[Any] = iterations
UpperCamelCase__: Tuple = lr
UpperCamelCase__: Union[str, Any] = log
UpperCamelCase__: Dict = make_grid
UpperCamelCase__: Any = return_val
UpperCamelCase__: Tuple = quantize
UpperCamelCase__: Dict = self.vqgan.decoder.z_shape
def UpperCAmelCase_ ( self: List[Any] , __lowerCamelCase: List[Any]=None , __lowerCamelCase: Optional[int]=None , __lowerCamelCase: Optional[int]=5 , __lowerCamelCase: List[Any]=True ):
'''simple docstring'''
UpperCamelCase__: Union[str, Any] = []
if output_path is None:
UpperCamelCase__: Optional[Any] = "./animation.gif"
if input_path is None:
UpperCamelCase__: Tuple = self.save_path
UpperCamelCase__: str = sorted(glob(input_path + "/*" ) )
if not len(__lowerCamelCase ):
raise ValueError(
"No images found in save path, aborting (did you pass save_intermediate=True to the generate"
" function?)" )
if len(__lowerCamelCase ) == 1:
print("Only one image found in save path, (did you pass save_intermediate=True to the generate function?)" )
UpperCamelCase__: str = total_duration / len(__lowerCamelCase )
UpperCamelCase__: Tuple = [frame_duration] * len(__lowerCamelCase )
if extend_frames:
UpperCamelCase__: Any = 1.5
UpperCamelCase__: Optional[int] = 3
for file_name in paths:
if file_name.endswith(".png" ):
images.append(imageio.imread(__lowerCamelCase ) )
imageio.mimsave(__lowerCamelCase , __lowerCamelCase , duration=__lowerCamelCase )
print(F"gif saved to {output_path}" )
def UpperCAmelCase_ ( self: List[str] , __lowerCamelCase: int=None , __lowerCamelCase: List[Any]=None ):
'''simple docstring'''
if not (path or img):
raise ValueError("Input either path or tensor" )
if img is not None:
raise NotImplementedError
UpperCamelCase__: str = preprocess(Image.open(__lowerCamelCase ) , target_image_size=256 ).to(self.device )
UpperCamelCase__: List[str] = preprocess_vqgan(__lowerCamelCase )
UpperCamelCase__ , *UpperCamelCase__: List[Any] = self.vqgan.encode(__lowerCamelCase )
return z
def UpperCAmelCase_ ( self: Optional[Any] , __lowerCamelCase: Union[str, Any] ):
'''simple docstring'''
UpperCamelCase__: Optional[Any] = self.latent.detach().requires_grad_()
UpperCamelCase__: Tuple = base_latent + transform_vector
if self.quantize:
UpperCamelCase__ , *UpperCamelCase__: Union[str, Any] = self.vqgan.quantize(__lowerCamelCase )
else:
UpperCamelCase__: str = trans_latent
return self.vqgan.decode(__lowerCamelCase )
def UpperCAmelCase_ ( self: Dict , __lowerCamelCase: str , __lowerCamelCase: Optional[int] , __lowerCamelCase: Dict=None ):
'''simple docstring'''
UpperCamelCase__: Dict = self.clip_preprocessor(text=__lowerCamelCase , images=__lowerCamelCase , return_tensors="pt" , padding=__lowerCamelCase )
UpperCamelCase__: str = self.clip(**__lowerCamelCase )
UpperCamelCase__: int = clip_outputs.logits_per_image
if weights is not None:
UpperCamelCase__: Any = similarity_logits * weights
return similarity_logits.sum()
def UpperCAmelCase_ ( self: int , __lowerCamelCase: List[Any] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: int ):
'''simple docstring'''
UpperCamelCase__: str = self._get_clip_similarity(pos_prompts["prompts"] , __lowerCamelCase , weights=(1 / pos_prompts["weights"]) )
if neg_prompts:
UpperCamelCase__: Any = self._get_clip_similarity(neg_prompts["prompts"] , __lowerCamelCase , weights=neg_prompts["weights"] )
else:
UpperCamelCase__: Any = torch.tensor([1] , device=self.device )
UpperCamelCase__: int = -torch.log(__lowerCamelCase ) + torch.log(__lowerCamelCase )
return loss
def UpperCAmelCase_ ( self: List[Any] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: List[str] , __lowerCamelCase: List[Any] ):
'''simple docstring'''
UpperCamelCase__: Optional[Any] = torch.randn_like(self.latent , requires_grad=__lowerCamelCase , device=self.device )
UpperCamelCase__: Union[str, Any] = torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
UpperCamelCase__: int = self._add_vector(__lowerCamelCase )
UpperCamelCase__: Optional[Any] = loop_post_process(__lowerCamelCase )
UpperCamelCase__: List[Any] = self._get_CLIP_loss(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
print("CLIP loss" , __lowerCamelCase )
if self.log:
wandb.log({"CLIP Loss": clip_loss} )
clip_loss.backward(retain_graph=__lowerCamelCase )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def UpperCAmelCase_ ( self: str , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: List[Any] , __lowerCamelCase: Dict ):
'''simple docstring'''
wandb.init(reinit=__lowerCamelCase , project="face-editor" )
wandb.config.update({"Positive Prompts": positive_prompts} )
wandb.config.update({"Negative Prompts": negative_prompts} )
wandb.config.update({"lr": self.lr, "iterations": self.iterations} )
if image_path:
UpperCamelCase__: Tuple = Image.open(__lowerCamelCase )
UpperCamelCase__: Optional[Any] = image.resize((256, 256) )
wandb.log("Original Image" , wandb.Image(__lowerCamelCase ) )
def UpperCAmelCase_ ( self: str , __lowerCamelCase: List[Any] ):
'''simple docstring'''
if not prompts:
return []
UpperCamelCase__: List[Any] = []
UpperCamelCase__: Union[str, Any] = []
if isinstance(__lowerCamelCase , __lowerCamelCase ):
UpperCamelCase__: List[str] = [prompt.strip() for prompt in prompts.split("|" )]
for prompt in prompts:
if isinstance(__lowerCamelCase , (tuple, list) ):
UpperCamelCase__: Dict = prompt[0]
UpperCamelCase__: Union[str, Any] = float(prompt[1] )
elif ":" in prompt:
UpperCamelCase__ , UpperCamelCase__: List[Any] = prompt.split(":" )
UpperCamelCase__: List[str] = float(__lowerCamelCase )
else:
UpperCamelCase__: List[Any] = prompt
UpperCamelCase__: Any = 1.0
processed_prompts.append(__lowerCamelCase )
weights.append(__lowerCamelCase )
return {
"prompts": processed_prompts,
"weights": torch.tensor(__lowerCamelCase , device=self.device ),
}
def UpperCAmelCase_ ( self: Any , __lowerCamelCase: Optional[int] , __lowerCamelCase: Optional[int]=None , __lowerCamelCase: Dict=None , __lowerCamelCase: int=True , __lowerCamelCase: List[Any]=False , __lowerCamelCase: int=True , __lowerCamelCase: List[Any]=True , __lowerCamelCase: Optional[int]=None , ):
'''simple docstring'''
if image_path:
UpperCamelCase__: Tuple = self._get_latent(__lowerCamelCase )
else:
UpperCamelCase__: Optional[Any] = torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
assert pos_prompts, "You must provide at least one positive prompt."
UpperCamelCase__: Union[str, Any] = self.process_prompts(__lowerCamelCase )
UpperCamelCase__: Tuple = self.process_prompts(__lowerCamelCase )
if save_final and save_path is None:
UpperCamelCase__: Union[str, Any] = os.path.join("./outputs/" , "_".join(pos_prompts["prompts"] ) )
if not os.path.exists(__lowerCamelCase ):
os.makedirs(__lowerCamelCase )
else:
UpperCamelCase__: List[str] = save_path + "_" + get_timestamp()
os.makedirs(__lowerCamelCase )
UpperCamelCase__: str = save_path
UpperCamelCase__: int = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print("Original Image" )
show_pil(custom_to_pil(__lowerCamelCase ) )
UpperCamelCase__: Optional[int] = loop_post_process(__lowerCamelCase )
for iter, transformed_img in enumerate(self._optimize_CLIP(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) ):
if show_intermediate:
show_pil(__lowerCamelCase )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , F"iter_{iter:03d}.png" ) )
if self.log:
wandb.log({"Image": wandb.Image(__lowerCamelCase )} )
if show_final:
show_pil(__lowerCamelCase )
if save_final:
transformed_img.save(os.path.join(self.save_path , F"iter_{iter:03d}_final.png" ) )
| 221 | 1 |
"""simple docstring"""
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def _lowerCamelCase ( UpperCAmelCase__ ) -> List[str]:
'''simple docstring'''
return sum(param.float().sum() if 'encoder.embeddings' not in key else 0 for key, param in state_dict.items() )
def _lowerCamelCase ( UpperCAmelCase__,UpperCAmelCase__ ) -> Optional[int]:
'''simple docstring'''
a__ = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
a__ = key.replace('heads.cmd.mim_head.cls.predictions','mmm_image_head' )
a__ = key.replace('heads.cmd.mlm_head.cls.predictions','mmm_text_head' )
a__ = key.replace('heads.cmd.itm_head.cls','itm_head' )
a__ = key.replace('heads.cmd.itm_head.pooler','itm_head.pooler' )
a__ = key.replace('heads.cmd.clip_head.logit_scale','flava.logit_scale' )
a__ = key.replace('heads.fairseq_mlm.cls.predictions','mlm_head' )
a__ = key.replace('heads.imagenet.mim_head.cls.predictions','mim_head' )
a__ = key.replace('mm_text_projection','flava.text_to_mm_projection' )
a__ = key.replace('mm_image_projection','flava.image_to_mm_projection' )
a__ = key.replace('image_encoder.module','flava.image_model' )
a__ = key.replace('text_encoder.module','flava.text_model' )
a__ = key.replace('mm_encoder.module.encoder.cls_token','flava.multimodal_model.cls_token' )
a__ = key.replace('mm_encoder.module','flava.multimodal_model' )
a__ = key.replace('text_projection','flava.text_projection' )
a__ = key.replace('image_projection','flava.image_projection' )
a__ = value.float()
for key, value in codebook_state_dict.items():
a__ = value
return upgrade
@torch.no_grad()
def _lowerCamelCase ( UpperCAmelCase__,UpperCAmelCase__,UpperCAmelCase__,UpperCAmelCase__=None ) -> Optional[int]:
'''simple docstring'''
if config_path is not None:
a__ = FlavaConfig.from_pretrained(lowerCAmelCase__ )
else:
a__ = FlavaConfig()
a__ = FlavaForPreTraining(lowerCAmelCase__ ).eval()
a__ = convert_dalle_checkpoint(lowerCAmelCase__,lowerCAmelCase__,save_checkpoint=lowerCAmelCase__ )
if os.path.exists(lowerCAmelCase__ ):
a__ = torch.load(lowerCAmelCase__,map_location='cpu' )
else:
a__ = torch.hub.load_state_dict_from_url(lowerCAmelCase__,map_location='cpu' )
a__ = upgrade_state_dict(lowerCAmelCase__,lowerCAmelCase__ )
hf_model.load_state_dict(lowerCAmelCase__ )
a__ = hf_model.state_dict()
a__ = count_parameters(lowerCAmelCase__ )
a__ = count_parameters(lowerCAmelCase__ ) + count_parameters(lowerCAmelCase__ )
assert torch.allclose(lowerCAmelCase__,lowerCAmelCase__,atol=1e-3 )
hf_model.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint")
parser.add_argument("--codebook_path", default=None, type=str, help="Path to flava codebook checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
__magic_name__ = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 232 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
_lowercase : Tuple =logging.get_logger(__name__)
_lowercase : Any ={
"""microsoft/deberta-v2-xlarge""": """https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json""",
"""microsoft/deberta-v2-xxlarge""": """https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json""",
"""microsoft/deberta-v2-xlarge-mnli""": (
"""https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json"""
),
"""microsoft/deberta-v2-xxlarge-mnli""": (
"""https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json"""
),
}
class UpperCamelCase_ ( snake_case__ ):
_a : int = 'deberta-v2'
def __init__( self : Any , lowerCamelCase : Dict=12_81_00 , lowerCamelCase : List[Any]=15_36 , lowerCamelCase : Union[str, Any]=24 , lowerCamelCase : Union[str, Any]=24 , lowerCamelCase : Optional[int]=61_44 , lowerCamelCase : Any="gelu" , lowerCamelCase : int=0.1 , lowerCamelCase : str=0.1 , lowerCamelCase : Optional[int]=5_12 , lowerCamelCase : str=0 , lowerCamelCase : Union[str, Any]=0.02 , lowerCamelCase : List[str]=1E-7 , lowerCamelCase : Dict=False , lowerCamelCase : List[Any]=-1 , lowerCamelCase : Union[str, Any]=0 , lowerCamelCase : Optional[int]=True , lowerCamelCase : Optional[int]=None , lowerCamelCase : Tuple=0 , lowerCamelCase : Tuple="gelu" , **lowerCamelCase : Optional[int] , ):
super().__init__(**lowerCamelCase )
lowerCamelCase_ : Any = hidden_size
lowerCamelCase_ : Dict = num_hidden_layers
lowerCamelCase_ : Optional[int] = num_attention_heads
lowerCamelCase_ : Optional[int] = intermediate_size
lowerCamelCase_ : str = hidden_act
lowerCamelCase_ : int = hidden_dropout_prob
lowerCamelCase_ : Tuple = attention_probs_dropout_prob
lowerCamelCase_ : Union[str, Any] = max_position_embeddings
lowerCamelCase_ : Union[str, Any] = type_vocab_size
lowerCamelCase_ : List[Any] = initializer_range
lowerCamelCase_ : Optional[Any] = relative_attention
lowerCamelCase_ : List[Any] = max_relative_positions
lowerCamelCase_ : Optional[int] = pad_token_id
lowerCamelCase_ : int = position_biased_input
# Backwards compatibility
if type(lowerCamelCase ) == str:
lowerCamelCase_ : Dict = [x.strip() for x in pos_att_type.lower().split('|' )]
lowerCamelCase_ : Optional[Any] = pos_att_type
lowerCamelCase_ : List[Any] = vocab_size
lowerCamelCase_ : Any = layer_norm_eps
lowerCamelCase_ : int = kwargs.get('pooler_hidden_size' , lowerCamelCase )
lowerCamelCase_ : Dict = pooler_dropout
lowerCamelCase_ : Tuple = pooler_hidden_act
class UpperCamelCase_ ( snake_case__ ):
@property
def __a ( self : Optional[int] ):
if self.task == "multiple-choice":
lowerCamelCase_ : Optional[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCamelCase_ : Any = {0: 'batch', 1: 'sequence'}
if self._config.type_vocab_size > 0:
return OrderedDict(
[('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis)] )
else:
return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis)] )
@property
def __a ( self : List[Any] ):
return 12
def __a ( self : Tuple , lowerCamelCase : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , lowerCamelCase : int = -1 , lowerCamelCase : int = -1 , lowerCamelCase : int = -1 , lowerCamelCase : bool = False , lowerCamelCase : Optional["TensorType"] = None , lowerCamelCase : int = 3 , lowerCamelCase : int = 40 , lowerCamelCase : int = 40 , lowerCamelCase : "PreTrainedTokenizerBase" = None , ):
lowerCamelCase_ : Union[str, Any] = super().generate_dummy_inputs(preprocessor=lowerCamelCase , framework=lowerCamelCase )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 364 | 0 |
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase : Dict =logging.get_logger(__name__)
def A__ ( __A ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
_lowerCamelCase : Dict = 128
elif "12-12" in model_name:
_lowerCamelCase : Optional[int] = 12
_lowerCamelCase : Optional[int] = 12
elif "14-14" in model_name:
_lowerCamelCase : Any = 14
_lowerCamelCase : Dict = 14
elif "16-16" in model_name:
_lowerCamelCase : List[str] = 16
_lowerCamelCase : List[Any] = 16
else:
raise ValueError("""Model not supported""" )
_lowerCamelCase : Dict = """huggingface/label-files"""
if "speech-commands" in model_name:
_lowerCamelCase : str = 35
_lowerCamelCase : Union[str, Any] = """speech-commands-v2-id2label.json"""
else:
_lowerCamelCase : List[Any] = 527
_lowerCamelCase : Union[str, Any] = """audioset-id2label.json"""
_lowerCamelCase : Optional[Any] = json.load(open(hf_hub_download(__A , __A , repo_type="""dataset""" ) , """r""" ) )
_lowerCamelCase : int = {int(__A ): v for k, v in idalabel.items()}
_lowerCamelCase : Any = idalabel
_lowerCamelCase : List[str] = {v: k for k, v in idalabel.items()}
return config
def A__ ( __A ):
'''simple docstring'''
if "module.v" in name:
_lowerCamelCase : Dict = name.replace("""module.v""" , """audio_spectrogram_transformer""" )
if "cls_token" in name:
_lowerCamelCase : List[str] = name.replace("""cls_token""" , """embeddings.cls_token""" )
if "dist_token" in name:
_lowerCamelCase : int = name.replace("""dist_token""" , """embeddings.distillation_token""" )
if "pos_embed" in name:
_lowerCamelCase : str = name.replace("""pos_embed""" , """embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
_lowerCamelCase : int = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
# transformer blocks
if "blocks" in name:
_lowerCamelCase : Any = name.replace("""blocks""" , """encoder.layer""" )
if "attn.proj" in name:
_lowerCamelCase : Union[str, Any] = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
_lowerCamelCase : List[str] = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
_lowerCamelCase : Optional[Any] = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
_lowerCamelCase : Any = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
_lowerCamelCase : int = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
_lowerCamelCase : List[Any] = name.replace("""mlp.fc2""" , """output.dense""" )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
_lowerCamelCase : str = name.replace("""audio_spectrogram_transformer.norm""" , """audio_spectrogram_transformer.layernorm""" )
# classifier head
if "module.mlp_head.0" in name:
_lowerCamelCase : List[str] = name.replace("""module.mlp_head.0""" , """classifier.layernorm""" )
if "module.mlp_head.1" in name:
_lowerCamelCase : Optional[Any] = name.replace("""module.mlp_head.1""" , """classifier.dense""" )
return name
def A__ ( __A , __A ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
_lowerCamelCase : Dict = orig_state_dict.pop(__A )
if "qkv" in key:
_lowerCamelCase : Tuple = key.split(""".""" )
_lowerCamelCase : int = int(key_split[3] )
_lowerCamelCase : Any = config.hidden_size
if "weight" in key:
_lowerCamelCase : Union[str, Any] = val[:dim, :]
_lowerCamelCase : Union[str, Any] = val[dim : dim * 2, :]
_lowerCamelCase : List[Any] = val[-dim:, :]
else:
_lowerCamelCase : Any = val[:dim]
_lowerCamelCase : List[Any] = val[dim : dim * 2]
_lowerCamelCase : Optional[Any] = val[-dim:]
else:
_lowerCamelCase : Dict = val
return orig_state_dict
def A__ ( __A ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = [
"""module.v.head.weight""",
"""module.v.head.bias""",
"""module.v.head_dist.weight""",
"""module.v.head_dist.bias""",
]
for k in ignore_keys:
state_dict.pop(__A , __A )
@torch.no_grad()
def A__ ( __A , __A , __A=False ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = get_audio_spectrogram_transformer_config(__A )
_lowerCamelCase : List[str] = {
"""ast-finetuned-audioset-10-10-0.4593""": (
"""https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.450""": (
"""https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.448""": (
"""https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.448-v2""": (
"""https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1"""
),
"""ast-finetuned-audioset-12-12-0.447""": (
"""https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1"""
),
"""ast-finetuned-audioset-14-14-0.443""": (
"""https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1"""
),
"""ast-finetuned-audioset-16-16-0.442""": (
"""https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1"""
),
"""ast-finetuned-speech-commands-v2""": (
"""https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1"""
),
}
# load original state_dict
_lowerCamelCase : List[Any] = model_name_to_url[model_name]
_lowerCamelCase : Union[str, Any] = torch.hub.load_state_dict_from_url(__A , map_location="""cpu""" )
# remove some keys
remove_keys(__A )
# rename some keys
_lowerCamelCase : str = convert_state_dict(__A , __A )
# load 🤗 model
_lowerCamelCase : Tuple = ASTForAudioClassification(__A )
model.eval()
model.load_state_dict(__A )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
_lowerCamelCase : Optional[int] = -4.2_677_393 if """speech-commands""" not in model_name else -6.845_978
_lowerCamelCase : Dict = 4.5_689_974 if """speech-commands""" not in model_name else 5.5_654_526
_lowerCamelCase : int = 1_024 if """speech-commands""" not in model_name else 128
_lowerCamelCase : List[Any] = ASTFeatureExtractor(mean=__A , std=__A , max_length=__A )
if "speech-commands" in model_name:
_lowerCamelCase : str = load_dataset("""speech_commands""" , """v0.02""" , split="""validation""" )
_lowerCamelCase : List[str] = dataset[0]["""audio"""]["""array"""]
else:
_lowerCamelCase : Tuple = hf_hub_download(
repo_id="""nielsr/audio-spectogram-transformer-checkpoint""" , filename="""sample_audio.flac""" , repo_type="""dataset""" , )
_lowerCamelCase , _lowerCamelCase : Optional[Any] = torchaudio.load(__A )
_lowerCamelCase : int = waveform.squeeze().numpy()
_lowerCamelCase : Optional[Any] = feature_extractor(__A , sampling_rate=16_000 , return_tensors="""pt""" )
# forward pass
_lowerCamelCase : Optional[int] = model(**__A )
_lowerCamelCase : Union[str, Any] = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
_lowerCamelCase : Optional[Any] = torch.tensor([-0.8_760, -7.0_042, -8.6_602] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
_lowerCamelCase : Any = torch.tensor([-1.1_986, -7.0_903, -8.2_718] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
_lowerCamelCase : Tuple = torch.tensor([-2.6_128, -8.0_080, -9.4_344] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
_lowerCamelCase : Any = torch.tensor([-1.5_080, -7.4_534, -8.8_917] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
_lowerCamelCase : Optional[Any] = torch.tensor([-0.5_050, -6.5_833, -8.0_843] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
_lowerCamelCase : Any = torch.tensor([-0.3_826, -7.0_336, -8.2_413] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
_lowerCamelCase : int = torch.tensor([-1.2_113, -6.9_101, -8.3_470] )
elif model_name == "ast-finetuned-speech-commands-v2":
_lowerCamelCase : Optional[Any] = torch.tensor([6.1_589, -8.0_566, -8.7_984] )
else:
raise ValueError("""Unknown model name""" )
if not torch.allclose(logits[0, :3] , __A , atol=1E-4 ):
raise ValueError("""Logits don't match""" )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(__A ).mkdir(exist_ok=__A )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__A )
print(F"""Saving feature extractor to {pytorch_dump_folder_path}""" )
feature_extractor.save_pretrained(__A )
if push_to_hub:
print("""Pushing model and feature extractor to the hub...""" )
model.push_to_hub(F"""MIT/{model_name}""" )
feature_extractor.push_to_hub(F"""MIT/{model_name}""" )
if __name__ == "__main__":
lowerCAmelCase : int =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="ast-finetuned-audioset-10-10-0.4593",
type=str,
help="Name of the Audio Spectrogram Transformer model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
lowerCAmelCase : Tuple =parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 15 | from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
lowerCAmelCase : Optional[Any] =logging.get_logger(__name__)
@add_end_docstrings(__lowerCAmelCase )
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self : str , *_UpperCamelCase : int , **_UpperCamelCase : List[str]) ->Tuple:
"""simple docstring"""
super().__init__(*_UpperCamelCase , **_UpperCamelCase)
requires_backends(self , """vision""")
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING)
def _SCREAMING_SNAKE_CASE ( self : Dict , _UpperCamelCase : List[str]=None) ->Optional[int]:
"""simple docstring"""
_lowerCamelCase : Optional[int] = {}
if top_k is not None:
_lowerCamelCase : str = top_k
return {}, {}, postprocess_params
def __call__( self : Optional[int] , _UpperCamelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **_UpperCamelCase : Optional[int]) ->Dict:
"""simple docstring"""
return super().__call__(_UpperCamelCase , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : Optional[int]) ->str:
"""simple docstring"""
_lowerCamelCase : Tuple = load_image(_UpperCamelCase)
_lowerCamelCase : Any = self.image_processor(images=_UpperCamelCase , return_tensors=self.framework)
return model_inputs
def _SCREAMING_SNAKE_CASE ( self : str , _UpperCamelCase : Union[str, Any]) ->List[str]:
"""simple docstring"""
_lowerCamelCase : Any = self.model(**_UpperCamelCase)
return model_outputs
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : List[str]=5) ->str:
"""simple docstring"""
if top_k > self.model.config.num_labels:
_lowerCamelCase : Union[str, Any] = self.model.config.num_labels
if self.framework == "pt":
_lowerCamelCase : Optional[Any] = model_outputs.logits.softmax(-1)[0]
_lowerCamelCase , _lowerCamelCase : Dict = probs.topk(_UpperCamelCase)
elif self.framework == "tf":
_lowerCamelCase : List[Any] = stable_softmax(model_outputs.logits , axis=-1)[0]
_lowerCamelCase : List[Any] = tf.math.top_k(_UpperCamelCase , k=_UpperCamelCase)
_lowerCamelCase , _lowerCamelCase : str = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(F"""Unsupported framework: {self.framework}""")
_lowerCamelCase : str = scores.tolist()
_lowerCamelCase : str = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(_UpperCamelCase , _UpperCamelCase)]
| 15 | 1 |
'''simple docstring'''
import mpmath # for roots of unity
import numpy as np
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Tuple , a_ : Optional[int]=None , a_ : int=None ):
"""simple docstring"""
__snake_case = list(poly_a or [0] )[:]
__snake_case = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
__snake_case = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
__snake_case = len(self.polyB )
# Add 0 to make lengths equal a power of 2
__snake_case = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
__snake_case = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
__snake_case = self.__multiply()
def A ( self : Any , a_ : Optional[Any] ):
"""simple docstring"""
__snake_case = [[x] for x in self.polyA] if which == "A" else [[x] for x in self.polyB]
# Corner case
if len(a_ ) <= 1:
return dft[0]
#
__snake_case = self.c_max_length // 2
while next_ncol > 0:
__snake_case = [[] for i in range(a_ )]
__snake_case = self.root**next_ncol
# First half of next step
__snake_case = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(a_ ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
__snake_case = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(a_ ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
__snake_case = new_dft
__snake_case = next_ncol // 2
return dft[0]
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = self.__dft("A" )
__snake_case = self.__dft("B" )
__snake_case = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
__snake_case = 2
while next_ncol <= self.c_max_length:
__snake_case = [[] for i in range(a_ )]
__snake_case = self.root ** (next_ncol // 2)
__snake_case = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
__snake_case = new_inverse_c
next_ncol *= 2
# Unpack
__snake_case = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1j for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : Optional[int] ):
"""simple docstring"""
__snake_case = "A = " + " + ".join(
f'''{coef}*x^{i}''' for coef, i in enumerate(self.polyA[: self.len_A] ) )
__snake_case = "B = " + " + ".join(
f'''{coef}*x^{i}''' for coef, i in enumerate(self.polyB[: self.len_B] ) )
__snake_case = "A*B = " + " + ".join(
f'''{coef}*x^{i}''' for coef, i in enumerate(self.product ) )
return f'''{a}\n{b}\n{c}'''
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 69 |
def A__ ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> str:
"""simple docstring"""
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
_UpperCAmelCase = str(bin(SCREAMING_SNAKE_CASE_ ) )[2:] # remove the leading "0b"
_UpperCAmelCase = str(bin(SCREAMING_SNAKE_CASE_ ) )[2:] # remove the leading "0b"
_UpperCAmelCase = max(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) )
return "0b" + "".join(
str(int(char_a == '''1''' and char_b == '''1''' ) )
for char_a, char_b in zip(a_binary.zfill(SCREAMING_SNAKE_CASE_ ) , b_binary.zfill(SCREAMING_SNAKE_CASE_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 32 | 0 |
"""simple docstring"""
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/text-classification/requirements.txt''')
A = logging.getLogger(__name__)
@dataclass
class __lowercase :
'''simple docstring'''
__lowerCAmelCase = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
__lowerCAmelCase = field(
default=_UpperCamelCase , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} )
__lowerCAmelCase = field(
default=_UpperCamelCase , metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
} , )
__lowerCAmelCase = field(
default=_UpperCamelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
__lowerCAmelCase = field(
default=_UpperCamelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
__lowerCAmelCase = field(
default=_UpperCamelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of prediction examples to this '''
'''value if set.'''
)
} , )
@dataclass
class __lowercase :
'''simple docstring'''
__lowerCAmelCase = field(
default=_UpperCamelCase , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
__lowerCAmelCase = field(
default=_UpperCamelCase , metadata={'''help''': '''Evaluation language. Also train language if `train_language` is set to None.'''} )
__lowerCAmelCase = field(
default=_UpperCamelCase , metadata={'''help''': '''Train language if it is different from the evaluation language.'''} )
__lowerCAmelCase = field(
default=_UpperCamelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
__lowerCAmelCase = field(
default=_UpperCamelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
__lowerCAmelCase = field(
default=_UpperCamelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
__lowerCAmelCase = field(
default=_UpperCamelCase , metadata={'''help''': '''arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()'''} , )
__lowerCAmelCase = field(
default=_UpperCamelCase , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
__lowerCAmelCase = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
__lowerCAmelCase = field(
default=_UpperCamelCase , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
__lowerCAmelCase = field(
default=_UpperCamelCase , metadata={'''help''': '''Will enable to load a pretrained model whose head dimensions are different.'''} , )
def __A ( ) -> Optional[int]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__a : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
__a , __a , __a : Union[str, Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_xnli''' , a_)
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout)] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__a : Union[str, Any] = training_args.get_process_log_level()
logger.setLevel(a_)
datasets.utils.logging.set_verbosity(a_)
transformers.utils.logging.set_verbosity(a_)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fpaa}""")
logger.info(F"""Training/evaluation parameters {training_args}""")
# Detecting last checkpoint.
__a : str = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
__a : Union[str, Any] = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''')
elif last_checkpoint is not None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''')
# Set seed before initializing model.
set_seed(training_args.seed)
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
__a : List[Any] = load_dataset(
'''xnli''' , model_args.language , split='''train''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
__a : Union[str, Any] = load_dataset(
'''xnli''' , model_args.train_language , split='''train''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
__a : Tuple = train_dataset.features['''label'''].names
if training_args.do_eval:
__a : Optional[int] = load_dataset(
'''xnli''' , model_args.language , split='''validation''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
__a : Tuple = eval_dataset.features['''label'''].names
if training_args.do_predict:
__a : List[str] = load_dataset(
'''xnli''' , model_args.language , split='''test''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
__a : int = predict_dataset.features['''label'''].names
# Labels
__a : Dict = len(a_)
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__a : List[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=a_ , idalabel={str(a_): label for i, label in enumerate(a_)} , labelaid={label: i for i, label in enumerate(a_)} , finetuning_task='''xnli''' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__a : Union[str, Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__a : Optional[Any] = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path) , config=a_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
__a : int = '''max_length'''
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
__a : List[Any] = False
def preprocess_function(a_ :Tuple):
# Tokenize the texts
return tokenizer(
examples['''premise'''] , examples['''hypothesis'''] , padding=a_ , max_length=data_args.max_seq_length , truncation=a_ , )
if training_args.do_train:
if data_args.max_train_samples is not None:
__a : Optional[int] = min(len(a_) , data_args.max_train_samples)
__a : List[Any] = train_dataset.select(range(a_))
with training_args.main_process_first(desc='''train dataset map pre-processing'''):
__a : Optional[int] = train_dataset.map(
a_ , batched=a_ , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on train dataset''' , )
# Log a few random samples from the training set:
for index in random.sample(range(len(a_)) , 3):
logger.info(F"""Sample {index} of the training set: {train_dataset[index]}.""")
if training_args.do_eval:
if data_args.max_eval_samples is not None:
__a : int = min(len(a_) , data_args.max_eval_samples)
__a : List[str] = eval_dataset.select(range(a_))
with training_args.main_process_first(desc='''validation dataset map pre-processing'''):
__a : List[str] = eval_dataset.map(
a_ , batched=a_ , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on validation dataset''' , )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
__a : Tuple = min(len(a_) , data_args.max_predict_samples)
__a : int = predict_dataset.select(range(a_))
with training_args.main_process_first(desc='''prediction dataset map pre-processing'''):
__a : List[Any] = predict_dataset.map(
a_ , batched=a_ , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on prediction dataset''' , )
# Get the metric function
__a : List[str] = evaluate.load('''xnli''')
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(a_ :EvalPrediction):
__a : str = p.predictions[0] if isinstance(p.predictions , a_) else p.predictions
__a : List[Any] = np.argmax(a_ , axis=1)
return metric.compute(predictions=a_ , references=p.label_ids)
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
__a : Optional[Any] = default_data_collator
elif training_args.fpaa:
__a : int = DataCollatorWithPadding(a_ , pad_to_multiple_of=8)
else:
__a : Any = None
# Initialize our Trainer
__a : Any = Trainer(
model=a_ , args=a_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=a_ , tokenizer=a_ , data_collator=a_ , )
# Training
if training_args.do_train:
__a : List[Any] = None
if training_args.resume_from_checkpoint is not None:
__a : List[str] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__a : Optional[Any] = last_checkpoint
__a : Tuple = trainer.train(resume_from_checkpoint=a_)
__a : List[Any] = train_result.metrics
__a : int = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(a_)
)
__a : int = min(a_ , len(a_))
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('''train''' , a_)
trainer.save_metrics('''train''' , a_)
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''')
__a : Any = trainer.evaluate(eval_dataset=a_)
__a : Tuple = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(a_)
__a : Optional[int] = min(a_ , len(a_))
trainer.log_metrics('''eval''' , a_)
trainer.save_metrics('''eval''' , a_)
# Prediction
if training_args.do_predict:
logger.info('''*** Predict ***''')
__a , __a , __a : Optional[Any] = trainer.predict(a_ , metric_key_prefix='''predict''')
__a : List[str] = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(a_)
)
__a : str = min(a_ , len(a_))
trainer.log_metrics('''predict''' , a_)
trainer.save_metrics('''predict''' , a_)
__a : List[str] = np.argmax(a_ , axis=1)
__a : int = os.path.join(training_args.output_dir , '''predictions.txt''')
if trainer.is_world_process_zero():
with open(a_ , '''w''') as writer:
writer.write('''index\tprediction\n''')
for index, item in enumerate(a_):
__a : Union[str, Any] = label_list[item]
writer.write(F"""{index}\t{item}\n""")
if __name__ == "__main__":
main() | 101 |
"""simple docstring"""
from math import isqrt, loga
def __A ( a_ :int) -> list[int]:
__a : int = [True] * max_number
for i in range(2 , isqrt(max_number - 1) + 1):
if is_prime[i]:
for j in range(i**2 , a_ , a_):
__a : int = False
return [i for i in range(2 , a_) if is_prime[i]]
def __A ( a_ :int = 80_08_00 , a_ :int = 80_08_00) -> int:
__a : str = degree * loga(a_)
__a : Tuple = int(a_)
__a : int = calculate_prime_numbers(a_)
__a : List[Any] = 0
__a : Optional[Any] = 0
__a : Dict = len(a_) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left])
+ prime_numbers[left] * loga(prime_numbers[right])
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(F'{solution() = }') | 101 | 1 |
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCamelCase ( __a ):
def __init__(self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Tuple:
super().__init__()
self.register_modules(vqvae=__UpperCamelCase , unet=__UpperCamelCase , scheduler=__UpperCamelCase )
@torch.no_grad()
def __call__(self , __UpperCamelCase = 1 , __UpperCamelCase = None , __UpperCamelCase = 0.0 , __UpperCamelCase = 50 , __UpperCamelCase = "pil" , __UpperCamelCase = True , **__UpperCamelCase , ) -> Union[Tuple, ImagePipelineOutput]:
UpperCamelCase_ : Optional[Any] = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=__UpperCamelCase , )
UpperCamelCase_ : Dict = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCamelCase_ : List[str] = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(__UpperCamelCase )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
UpperCamelCase_ : int = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCamelCase_ : Optional[Any] = {}
if accepts_eta:
UpperCamelCase_ : List[str] = eta
for t in self.progress_bar(self.scheduler.timesteps ):
UpperCamelCase_ : Union[str, Any] = self.scheduler.scale_model_input(__UpperCamelCase , __UpperCamelCase )
# predict the noise residual
UpperCamelCase_ : List[Any] = self.unet(__UpperCamelCase , __UpperCamelCase ).sample
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase_ : List[str] = self.scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
# decode the image latents with the VAE
UpperCamelCase_ : str = self.vqvae.decode(__UpperCamelCase ).sample
UpperCamelCase_ : Optional[Any] = (image / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase_ : int = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCamelCase_ : Tuple = self.numpy_to_pil(__UpperCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__UpperCamelCase )
| 635 | from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
SCREAMING_SNAKE_CASE : Union[str, Any] = {"UserAgent": UserAgent().random}
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : Dict ):
UpperCamelCase_ : Tuple = script.contents[0]
UpperCamelCase_ : List[Any] = json.loads(data[data.find("""{\"config\"""" ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class UpperCamelCase :
def __init__(self , __UpperCamelCase ) -> List[str]:
UpperCamelCase_ : Union[str, Any] = f'''https://www.instagram.com/{username}/'''
UpperCamelCase_ : Optional[int] = self.get_json()
def A_ (self ) -> dict:
UpperCamelCase_ : Optional[Any] = requests.get(self.url , headers=__UpperCamelCase ).text
UpperCamelCase_ : Any = BeautifulSoup(__UpperCamelCase , """html.parser""" ).find_all("""script""" )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__(self ) -> str:
return f'''{self.__class__.__name__}(\'{self.username}\')'''
def __str__(self ) -> str:
return f'''{self.fullname} ({self.username}) is {self.biography}'''
@property
def A_ (self ) -> str:
return self.user_data["username"]
@property
def A_ (self ) -> str:
return self.user_data["full_name"]
@property
def A_ (self ) -> str:
return self.user_data["biography"]
@property
def A_ (self ) -> str:
return self.user_data["business_email"]
@property
def A_ (self ) -> str:
return self.user_data["external_url"]
@property
def A_ (self ) -> int:
return self.user_data["edge_followed_by"]["count"]
@property
def A_ (self ) -> int:
return self.user_data["edge_follow"]["count"]
@property
def A_ (self ) -> int:
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def A_ (self ) -> str:
return self.user_data["profile_pic_url_hd"]
@property
def A_ (self ) -> bool:
return self.user_data["is_verified"]
@property
def A_ (self ) -> bool:
return self.user_data["is_private"]
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : str = "github" ):
import os
if os.environ.get("""CI""" ):
return # test failing on GitHub Actions
UpperCamelCase_ : Any = InstagramUser(_SCREAMING_SNAKE_CASE )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , _SCREAMING_SNAKE_CASE )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 12_0000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "[email protected]"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith("""https://instagram.""" )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE : Dict = InstagramUser("github")
print(instagram_user)
print(F'''{instagram_user.number_of_posts = }''')
print(F'''{instagram_user.number_of_followers = }''')
print(F'''{instagram_user.number_of_followings = }''')
print(F'''{instagram_user.email = }''')
print(F'''{instagram_user.website = }''')
print(F'''{instagram_user.profile_picture_url = }''')
print(F'''{instagram_user.is_verified = }''')
print(F'''{instagram_user.is_private = }''')
| 635 | 1 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
lowerCAmelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase : AutoencoderKL , UpperCamelCase : CLIPTextModel , UpperCamelCase : CLIPTokenizer , UpperCamelCase : UNetaDConditionModel , UpperCamelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , UpperCamelCase : StableDiffusionSafetyChecker , UpperCamelCase : CLIPImageProcessor , ):
'''simple docstring'''
super().__init__()
self.register_modules(
vae=UpperCamelCase , text_encoder=UpperCamelCase , tokenizer=UpperCamelCase , unet=UpperCamelCase , scheduler=UpperCamelCase , safety_checker=UpperCamelCase , feature_extractor=UpperCamelCase , )
def UpperCamelCase_ ( self : List[Any] , UpperCamelCase : Optional[Union[str, int]] = "auto" ):
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_snake_case : int = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(UpperCamelCase )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
self.enable_attention_slicing(UpperCamelCase )
@torch.no_grad()
def __call__( self : Optional[int] , UpperCamelCase : Union[str, List[str]] , UpperCamelCase : int = 5_12 , UpperCamelCase : int = 5_12 , UpperCamelCase : int = 50 , UpperCamelCase : float = 7.5 , UpperCamelCase : Optional[Union[str, List[str]]] = None , UpperCamelCase : Optional[int] = 1 , UpperCamelCase : float = 0.0 , UpperCamelCase : Optional[torch.Generator] = None , UpperCamelCase : Optional[torch.FloatTensor] = None , UpperCamelCase : Optional[str] = "pil" , UpperCamelCase : bool = True , UpperCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCamelCase : int = 1 , UpperCamelCase : Optional[torch.FloatTensor] = None , **UpperCamelCase : Union[str, Any] , ):
'''simple docstring'''
if isinstance(UpperCamelCase , UpperCamelCase ):
_snake_case : int = 1
elif isinstance(UpperCamelCase , UpperCamelCase ):
_snake_case : str = len(UpperCamelCase )
else:
raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(UpperCamelCase )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(UpperCamelCase , UpperCamelCase ) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(UpperCamelCase )}.""" )
# get prompt text embeddings
_snake_case : List[Any] = self.tokenizer(
UpperCamelCase , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
_snake_case : Tuple = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
_snake_case : List[Any] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
_snake_case : str = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
_snake_case : Tuple = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
_snake_case , _snake_case , _snake_case : str = text_embeddings.shape
_snake_case : List[Any] = text_embeddings.repeat(1 , UpperCamelCase , 1 )
_snake_case : int = text_embeddings.view(bs_embed * num_images_per_prompt , UpperCamelCase , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
_snake_case : Dict = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
_snake_case : List[str]
if negative_prompt is None:
_snake_case : str = ['']
elif type(UpperCamelCase ) is not type(UpperCamelCase ):
raise TypeError(
f"""`negative_prompt` should be the same type to `prompt`, but got {type(UpperCamelCase )} !="""
f""" {type(UpperCamelCase )}.""" )
elif isinstance(UpperCamelCase , UpperCamelCase ):
_snake_case : Optional[int] = [negative_prompt]
elif batch_size != len(UpperCamelCase ):
raise ValueError(
f"""`negative_prompt`: {negative_prompt} has batch size {len(UpperCamelCase )}, but `prompt`:"""
f""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
' the batch size of `prompt`.' )
else:
_snake_case : str = negative_prompt
_snake_case : List[Any] = text_input_ids.shape[-1]
_snake_case : List[Any] = self.tokenizer(
UpperCamelCase , padding='max_length' , max_length=UpperCamelCase , truncation=UpperCamelCase , return_tensors='pt' , )
_snake_case : Optional[int] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
_snake_case : Optional[Any] = uncond_embeddings.shape[1]
_snake_case : List[str] = uncond_embeddings.repeat(UpperCamelCase , UpperCamelCase , 1 )
_snake_case : str = uncond_embeddings.view(batch_size * num_images_per_prompt , UpperCamelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_snake_case : Any = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
_snake_case : Optional[Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
_snake_case : List[Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
_snake_case : List[Any] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
_snake_case : Any = torch.randn(
UpperCamelCase , generator=UpperCamelCase , device='cpu' , dtype=UpperCamelCase ).to(self.device )
_snake_case : List[str] = torch.randn(UpperCamelCase , generator=UpperCamelCase , device='cpu' , dtype=UpperCamelCase ).to(
self.device )
else:
_snake_case : str = torch.randn(
UpperCamelCase , generator=UpperCamelCase , device=self.device , dtype=UpperCamelCase )
_snake_case : List[str] = torch.randn(UpperCamelCase , generator=UpperCamelCase , device=self.device , dtype=UpperCamelCase )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
_snake_case : Optional[Any] = latents_reference.to(self.device )
_snake_case : Optional[Any] = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
_snake_case : int = (latents_shape[3] - latents_shape_reference[3]) // 2
_snake_case : List[str] = (latents_shape[2] - latents_shape_reference[2]) // 2
_snake_case : List[Any] = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
_snake_case : Tuple = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
_snake_case : Tuple = 0 if dx < 0 else dx
_snake_case : Optional[int] = 0 if dy < 0 else dy
_snake_case : Union[str, Any] = max(-dx , 0 )
_snake_case : List[str] = max(-dy , 0 )
# import pdb
# pdb.set_trace()
_snake_case : Tuple = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(UpperCamelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
_snake_case : Union[str, Any] = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
_snake_case : Optional[Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_snake_case : List[str] = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_snake_case : str = {}
if accepts_eta:
_snake_case : List[str] = eta
for i, t in enumerate(self.progress_bar(UpperCamelCase ) ):
# expand the latents if we are doing classifier free guidance
_snake_case : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_snake_case : Union[str, Any] = self.scheduler.scale_model_input(UpperCamelCase , UpperCamelCase )
# predict the noise residual
_snake_case : Optional[int] = self.unet(UpperCamelCase , UpperCamelCase , encoder_hidden_states=UpperCamelCase ).sample
# perform guidance
if do_classifier_free_guidance:
_snake_case , _snake_case : Union[str, Any] = noise_pred.chunk(2 )
_snake_case : str = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
_snake_case : Optional[int] = self.scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase , **UpperCamelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(UpperCamelCase , UpperCamelCase , UpperCamelCase )
_snake_case : List[Any] = 1 / 0.1_82_15 * latents
_snake_case : Any = self.vae.decode(UpperCamelCase ).sample
_snake_case : Any = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
_snake_case : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
_snake_case : Tuple = self.feature_extractor(self.numpy_to_pil(UpperCamelCase ) , return_tensors='pt' ).to(
self.device )
_snake_case , _snake_case : Optional[int] = self.safety_checker(
images=UpperCamelCase , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
_snake_case : Union[str, Any] = None
if output_type == "pil":
_snake_case : Any = self.numpy_to_pil(UpperCamelCase )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=UpperCamelCase , nsfw_content_detected=UpperCamelCase )
| 669 |
# Function to print upper half of diamond (pyramid)
def lowerCamelCase_ ( lowerCAmelCase: Optional[Any] )-> List[str]:
for i in range(0 , lowerCAmelCase ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(' ' , end='' )
for _ in range(0 , i + 1 ): # printing stars
print('* ' , end='' )
print()
def lowerCamelCase_ ( lowerCAmelCase: Optional[int] )-> List[Any]:
for i in range(lowerCAmelCase , 0 , -1 ):
for _ in range(lowerCAmelCase , 0 , -1 ): # printing stars
print('* ' , end='' )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(' ' , end='' )
def lowerCamelCase_ ( lowerCAmelCase: Tuple )-> int:
if n <= 0:
print(' ... .... nothing printing :(' )
return
floyd(lowerCAmelCase ) # upper half
reverse_floyd(lowerCAmelCase ) # lower half
if __name__ == "__main__":
print(r"""| /\ | |- | |- |--| |\ /| |-""")
print(r"""|/ \| |- |_ |_ |__| | \/ | |_""")
lowerCAmelCase_ = 1
while K:
lowerCAmelCase_ = int(input("""enter the number and , and see the magic : """))
print()
pretty_print(user_number)
lowerCAmelCase_ = int(input("""press 0 to exit... and 1 to continue..."""))
print("""Good Bye...""")
| 669 | 1 |
def __lowerCamelCase ( lowerCamelCase__ : list[int] , lowerCamelCase__ : list[int] ):
'''simple docstring'''
if not len(lowerCamelCase__ ) == len(lowerCamelCase__ ) == 3:
raise ValueError("""Please enter a valid equation.""" )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError("""Both a & b of two equations can't be zero.""" )
# Extract the coefficients
lowerCamelCase , lowerCamelCase , lowerCamelCase = equationa
lowerCamelCase , lowerCamelCase , lowerCamelCase = equationa
# Calculate the determinants of the matrices
lowerCamelCase = aa * ba - aa * ba
lowerCamelCase = ca * ba - ca * ba
lowerCamelCase = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError("""Infinite solutions. (Consistent system)""" )
else:
raise ValueError("""No solution. (Inconsistent system)""" )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
lowerCamelCase = determinant_x / determinant
lowerCamelCase = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 457 |
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
UpperCAmelCase : Any = logging.get_logger(__name__)
class __lowercase ( a_ ):
"""simple docstring"""
UpperCamelCase : List[Any] = ["audio_values", "audio_mask"]
def __init__( self , A=20_48 , A=1 , A=[16, 16] , A=1_28 , A=4_41_00 , A=86 , A=20_48 , A=0.0 , **A , ) -> Dict:
'''simple docstring'''
super().__init__(
feature_size=A , sampling_rate=A , padding_value=A , **A , )
lowerCamelCase = spectrogram_length
lowerCamelCase = num_channels
lowerCamelCase = patch_size
lowerCamelCase = feature_size // self.patch_size[1]
lowerCamelCase = n_fft
lowerCamelCase = sampling_rate // hop_length_to_sampling_rate
lowerCamelCase = sampling_rate
lowerCamelCase = padding_value
lowerCamelCase = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=A , min_frequency=0.0 , max_frequency=22050.0 , sampling_rate=A , norm="""slaney""" , mel_scale="""slaney""" , ).T
def __A ( self , A ) -> np.ndarray:
'''simple docstring'''
lowerCamelCase = spectrogram(
A , window_function(self.n_fft , """hann""" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="""dB""" , db_range=80.0 , )
lowerCamelCase = log_spec[:, :-1]
lowerCamelCase = log_spec - 20.0
lowerCamelCase = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self , A , A = None , A = True , A = None , A = False , A = False , **A , ) -> BatchFeature:
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"""This feature extractor is set to support sampling rate"""
F' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'
F' with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
lowerCamelCase = isinstance(A , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'Only mono-channel audio is supported for input to {self}' )
lowerCamelCase = is_batched_numpy or (
isinstance(A , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCamelCase = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(A , np.ndarray ):
lowerCamelCase = np.asarray(A , dtype=np.floataa )
elif isinstance(A , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCamelCase = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCamelCase = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
lowerCamelCase = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , A ):
lowerCamelCase = [np.asarray(A , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
lowerCamelCase = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
lowerCamelCase = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
lowerCamelCase = np.array(A ).astype(np.floataa )
# convert into correct format for padding
lowerCamelCase = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
lowerCamelCase = np.ones([len(A ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
lowerCamelCase = padded_audio_features * self.padding_value
for i in range(len(A ) ):
lowerCamelCase = audio_features[i]
lowerCamelCase = feature
# return as BatchFeature
if return_attention_mask:
lowerCamelCase = {"""audio_values""": padded_audio_features, """audio_mask""": audio_mask}
else:
lowerCamelCase = {"""audio_values""": padded_audio_features}
lowerCamelCase = BatchFeature(data=A , tensor_type=A )
return encoded_inputs
| 457 | 1 |
'''simple docstring'''
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class _a ( __a ):
"""simple docstring"""
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowercase_ , """tf_padding""" ) )
self.parent.assertTrue(hasattr(lowercase_ , """depth_multiplier""" ) )
class _a :
"""simple docstring"""
def __init__( self : str , lowercase_ : Union[str, Any] , lowercase_ : List[str]=13 , lowercase_ : int=3 , lowercase_ : Optional[Any]=32 , lowercase_ : Optional[int]=0.2_5 , lowercase_ : str=8 , lowercase_ : str=8 , lowercase_ : Tuple=6 , lowercase_ : str=32 , lowercase_ : Optional[Any]=True , lowercase_ : Any=True , lowercase_ : Tuple=True , lowercase_ : str="relu6" , lowercase_ : Any=1_280 , lowercase_ : Optional[Any]=0.1 , lowercase_ : Any=0.0_2 , lowercase_ : str=True , lowercase_ : Optional[Any]=True , lowercase_ : str=10 , lowercase_ : Union[str, Any]=None , ):
'''simple docstring'''
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = num_channels
lowercase_ = image_size
lowercase_ = depth_multiplier
lowercase_ = depth_divisible_by
lowercase_ = min_depth
lowercase_ = expand_ratio
lowercase_ = tf_padding
lowercase_ = output_stride
lowercase_ = first_layer_is_expansion
lowercase_ = finegrained_output
lowercase_ = hidden_act
lowercase_ = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
lowercase_ = classifier_dropout_prob
lowercase_ = use_labels
lowercase_ = is_training
lowercase_ = num_labels
lowercase_ = initializer_range
lowercase_ = scope
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
lowercase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ = None
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size] , self.num_labels )
lowercase_ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowercase_ = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def lowerCamelCase__ ( self : Any , lowercase_ : List[str] , lowercase_ : int , lowercase_ : Any , lowercase_ : Optional[Any] ):
'''simple docstring'''
lowercase_ = MobileNetVaModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ = model(lowercase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def lowerCamelCase__ ( self : Union[str, Any] , lowercase_ : Optional[Any] , lowercase_ : int , lowercase_ : Optional[int] , lowercase_ : Union[str, Any] ):
'''simple docstring'''
lowercase_ = self.num_labels
lowercase_ = MobileNetVaForImageClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ = model(lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self : int , lowercase_ : List[str] , lowercase_ : Union[str, Any] , lowercase_ : Any , lowercase_ : Union[str, Any] ):
'''simple docstring'''
lowercase_ = self.num_labels
lowercase_ = MobileNetVaForSemanticSegmentation(lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ = model(lowercase_ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
lowercase_ = model(lowercase_ , labels=lowercase_ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ , lowercase_ = config_and_inputs
lowercase_ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _a ( __a , __a , unittest.TestCase ):
"""simple docstring"""
A_ = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
A_ = (
{
'''feature-extraction''': MobileNetVaModel,
'''image-classification''': MobileNetVaForImageClassification,
'''image-segmentation''': MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
A_ = False
A_ = False
A_ = False
A_ = False
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ = MobileNetVaModelTester(self )
lowercase_ = MobileNetVaConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ )
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileNetV2 does not use inputs_embeds""" )
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
pass
@unittest.skip(reason="""MobileNetV2 does not support input and output embeddings""" )
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
pass
@unittest.skip(reason="""MobileNetV2 does not output attentions""" )
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ = model_class(lowercase_ )
lowercase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ = [*signature.parameters.keys()]
lowercase_ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowercase_ )
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
def check_hidden_states_output(lowercase_ : List[Any] , lowercase_ : Optional[Any] , lowercase_ : Tuple ):
lowercase_ = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
lowercase_ = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
lowercase_ = outputs.hidden_states
lowercase_ = 16
self.assertEqual(len(lowercase_ ) , lowercase_ )
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_ )
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowercase_ )
@slow
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ = MobileNetVaModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def A_ ( ) ->List[Any]:
lowercase_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _a ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
return (
MobileNetVaImageProcessor.from_pretrained("""google/mobilenet_v2_1.0_224""" ) if is_vision_available() else None
)
@slow
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = MobileNetVaForImageClassification.from_pretrained("""google/mobilenet_v2_1.0_224""" ).to(lowercase_ )
lowercase_ = self.default_image_processor
lowercase_ = prepare_img()
lowercase_ = image_processor(images=lowercase_ , return_tensors="""pt""" ).to(lowercase_ )
# forward pass
with torch.no_grad():
lowercase_ = model(**lowercase_ )
# verify the logits
lowercase_ = torch.Size((1, 1_001) )
self.assertEqual(outputs.logits.shape , lowercase_ )
lowercase_ = torch.tensor([0.2_4_4_5, -1.1_9_9_3, 0.1_9_0_5] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1e-4 ) )
@slow
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ = MobileNetVaForSemanticSegmentation.from_pretrained("""google/deeplabv3_mobilenet_v2_1.0_513""" )
lowercase_ = model.to(lowercase_ )
lowercase_ = MobileNetVaImageProcessor.from_pretrained("""google/deeplabv3_mobilenet_v2_1.0_513""" )
lowercase_ = prepare_img()
lowercase_ = image_processor(images=lowercase_ , return_tensors="""pt""" ).to(lowercase_ )
# forward pass
with torch.no_grad():
lowercase_ = model(**lowercase_ )
lowercase_ = outputs.logits
# verify the logits
lowercase_ = torch.Size((1, 21, 65, 65) )
self.assertEqual(logits.shape , lowercase_ )
lowercase_ = torch.tensor(
[
[[17.5_790, 17.7_581, 18.3_355], [18.3_257, 18.4_230, 18.8_973], [18.6_169, 18.8_650, 19.2_187]],
[[-2.1_5_9_5, -2.0_9_7_7, -2.3_7_4_1], [-2.4_2_2_6, -2.3_0_2_8, -2.6_8_3_5], [-2.7_8_1_9, -2.5_9_9_1, -2.7_7_0_6]],
[[4.2_0_5_8, 4.8_3_1_7, 4.7_6_3_8], [4.4_1_3_6, 5.0_3_6_1, 4.9_3_8_3], [4.5_0_2_8, 4.9_6_4_4, 4.8_7_3_4]],
] , device=lowercase_ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , lowercase_ , atol=1e-4 ) )
| 703 | '''simple docstring'''
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class _a :
"""simple docstring"""
def __init__( self : Dict , lowercase_ : List[Any] , lowercase_ : Dict=13 , lowercase_ : int=7 , lowercase_ : Optional[Any]=True , lowercase_ : str=True , lowercase_ : List[str]=False , lowercase_ : List[str]=True , lowercase_ : Optional[Any]=99 , lowercase_ : int=64 , lowercase_ : Union[str, Any]=5 , lowercase_ : str=4 , lowercase_ : Any=64 , lowercase_ : str="gelu" , lowercase_ : List[str]=0.1 , lowercase_ : List[str]=0.1 , lowercase_ : Dict=512 , lowercase_ : Any=16 , lowercase_ : List[str]=2 , lowercase_ : int=0.0_2 , lowercase_ : List[str]=3 , lowercase_ : Tuple=4 , lowercase_ : Union[str, Any]=None , ):
'''simple docstring'''
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = seq_length
lowercase_ = is_training
lowercase_ = use_input_mask
lowercase_ = use_token_type_ids
lowercase_ = use_labels
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = type_vocab_size
lowercase_ = type_sequence_label_size
lowercase_ = initializer_range
lowercase_ = num_labels
lowercase_ = num_choices
lowercase_ = scope
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
return MPNetConfig.from_pretrained("""microsoft/mpnet-base""" )
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ = None
if self.use_input_mask:
lowercase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ = None
lowercase_ = None
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase_ = ids_tensor([self.batch_size] , self.num_choices )
lowercase_ = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def lowerCamelCase__ ( self : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : Tuple , lowercase_ : int , lowercase_ : List[Any] , lowercase_ : Optional[Any] , lowercase_ : List[str] ):
'''simple docstring'''
lowercase_ = MPNetModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ = model(lowercase_ , lowercase_ )
lowercase_ = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCamelCase__ ( self : Optional[int] , lowercase_ : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : Optional[int] , lowercase_ : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : Optional[Any] ):
'''simple docstring'''
lowercase_ = MPNetForQuestionAnswering(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ = model(
lowercase_ , attention_mask=lowercase_ , start_positions=lowercase_ , end_positions=lowercase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase__ ( self : Any , lowercase_ : int , lowercase_ : Optional[int] , lowercase_ : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : str , lowercase_ : Union[str, Any] ):
'''simple docstring'''
lowercase_ = self.num_labels
lowercase_ = MPNetForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self : Union[str, Any] , lowercase_ : Any , lowercase_ : Any , lowercase_ : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : Optional[int] , lowercase_ : Optional[Any] ):
'''simple docstring'''
lowercase_ = self.num_choices
lowercase_ = MPNetForMultipleChoice(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase_ = model(
lowercase_ , attention_mask=lowercase_ , labels=lowercase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase__ ( self : Tuple , lowercase_ : Optional[int] , lowercase_ : Any , lowercase_ : Tuple , lowercase_ : List[str] , lowercase_ : Optional[Any] , lowercase_ : Optional[Any] ):
'''simple docstring'''
lowercase_ = self.num_labels
lowercase_ = MPNetForTokenClassification(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = self.prepare_config_and_inputs()
((lowercase_) , (lowercase_) , (lowercase_) , (lowercase_) , (lowercase_) , (lowercase_)) = config_and_inputs
lowercase_ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _a ( __a , __a , unittest.TestCase ):
"""simple docstring"""
A_ = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
A_ = (
{
'''feature-extraction''': MPNetModel,
'''fill-mask''': MPNetForMaskedLM,
'''question-answering''': MPNetForQuestionAnswering,
'''text-classification''': MPNetForSequenceClassification,
'''token-classification''': MPNetForTokenClassification,
'''zero-shot''': MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
A_ = False
A_ = True
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = MPNetModelTester(self )
lowercase_ = ConfigTester(self , config_class=lowercase_ , hidden_size=37 )
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*lowercase_ )
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*lowercase_ )
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*lowercase_ )
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*lowercase_ )
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*lowercase_ )
@require_torch
class _a ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
lowercase_ = MPNetModel.from_pretrained("""microsoft/mpnet-base""" )
lowercase_ = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
lowercase_ = model(lowercase_ )[0]
lowercase_ = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , lowercase_ )
lowercase_ = torch.tensor(
[[[-0.0_5_5_0, 0.1_9_4_3, -0.0_7_4_0], [-0.0_5_6_2, 0.2_2_1_1, -0.0_5_7_9], [-0.0_4_3_7, 0.3_3_3_7, -0.0_6_4_1]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase_ , atol=1e-4 ) )
| 603 | 0 |
def UpperCAmelCase_ ( __UpperCAmelCase : str , __UpperCAmelCase : Union[str, Any] ) -> Any:
SCREAMING_SNAKE_CASE_ = [1]
for i in range(2 , __UpperCAmelCase ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = list(range(__UpperCAmelCase ) )
# Find permutation
while factorials:
SCREAMING_SNAKE_CASE_ = factorials.pop()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = divmod(__UpperCAmelCase , __UpperCAmelCase )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod() | 31 |
"""simple docstring"""
from __future__ import annotations
from statistics import mean
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = [0] * no_of_processes
__lowerCAmelCase = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(_lowerCAmelCase ):
__lowerCAmelCase = burst_time[i]
__lowerCAmelCase = []
__lowerCAmelCase = 0
__lowerCAmelCase = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
__lowerCAmelCase = []
__lowerCAmelCase = -1
for i in range(_lowerCAmelCase ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(_lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
__lowerCAmelCase = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
__lowerCAmelCase = i
total_time += burst_time[target_process]
completed += 1
__lowerCAmelCase = 0
__lowerCAmelCase = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = [0] * no_of_processes
for i in range(_lowerCAmelCase ):
__lowerCAmelCase = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print('''[TEST CASE 01]''')
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = [2, 5, 3, 7]
SCREAMING_SNAKE_CASE_ = [0, 0, 0, 0]
SCREAMING_SNAKE_CASE_ = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
SCREAMING_SNAKE_CASE_ = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print('''PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time''')
for i, process_id in enumerate(list(range(1, 5))):
print(
F"{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t"
F"{waiting_time[i]}\t\t\t\t{turn_around_time[i]}"
)
print(F"\nAverage waiting time = {mean(waiting_time):.5f}")
print(F"Average turnaround time = {mean(turn_around_time):.5f}")
| 465 | 0 |
"""simple docstring"""
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : int = 1_00 ):
'''simple docstring'''
lowerCAmelCase = (n * (n + 1) // 2) ** 2
lowerCAmelCase = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(f'{solution() = }')
| 393 |
"""simple docstring"""
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
SCREAMING_SNAKE_CASE__ = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation="relu")
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(32, (3, 3), activation="relu"))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=128, activation="relu"))
classifier.add(layers.Dense(units=1, activation="sigmoid"))
# Compiling the CNN
classifier.compile(
optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"]
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
SCREAMING_SNAKE_CASE__ = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
SCREAMING_SNAKE_CASE__ = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255)
SCREAMING_SNAKE_CASE__ = train_datagen.flow_from_directory(
"dataset/training_set", target_size=(64, 64), batch_size=32, class_mode="binary"
)
SCREAMING_SNAKE_CASE__ = test_datagen.flow_from_directory(
"dataset/test_set", target_size=(64, 64), batch_size=32, class_mode="binary"
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=30, validation_data=test_set
)
classifier.save("cnn.h5")
# Part 3 - Making new predictions
SCREAMING_SNAKE_CASE__ = tf.keras.preprocessing.image.load_img(
"dataset/single_prediction/image.png", target_size=(64, 64)
)
SCREAMING_SNAKE_CASE__ = tf.keras.preprocessing.image.img_to_array(test_image)
SCREAMING_SNAKE_CASE__ = np.expand_dims(test_image, axis=0)
SCREAMING_SNAKE_CASE__ = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
SCREAMING_SNAKE_CASE__ = "Normal"
if result[0][0] == 1:
SCREAMING_SNAKE_CASE__ = "Abnormality detected"
| 393 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = '''microsoft/speecht5_tts'''
snake_case_ = (
'''This is a tool that reads an English text out loud. It takes an input named `text` which should contain the '''
'''text to read (in English) and returns a waveform object containing the sound.'''
)
snake_case_ = '''text_reader'''
snake_case_ = SpeechTaProcessor
snake_case_ = SpeechTaForTextToSpeech
snake_case_ = SpeechTaHifiGan
snake_case_ = ['''text''']
snake_case_ = ['''audio''']
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
if self.post_processor is None:
__lowerCamelCase = 'microsoft/speecht5_hifigan'
super().setup()
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__=None ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = self.pre_processor(text=lowerCamelCase__ , return_tensors='pt' , truncation=lowerCamelCase__ )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError('Datasets needs to be installed if not passing speaker embeddings.' )
__lowerCamelCase = load_dataset('Matthijs/cmu-arctic-xvectors' , split='validation' )
__lowerCamelCase = torch.tensor(embeddings_dataset[7_305]['xvector'] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def lowercase_ ( self , lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
with torch.no_grad():
return self.model.generate_speech(**lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
with torch.no_grad():
return self.post_processor(lowerCamelCase__ ).cpu().detach()
| 469 |
from string import ascii_uppercase
__A = {str(ord(c) - 55): c for c in ascii_uppercase}
def lowerCamelCase_ ( UpperCamelCase__ : int , UpperCamelCase__ : int ) -> str:
"""simple docstring"""
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise TypeError('int() can\'t convert non-string with explicit base' )
if num < 0:
raise ValueError('parameter must be positive int' )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise TypeError('\'str\' object cannot be interpreted as an integer' )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise TypeError('\'float\' object cannot be interpreted as an integer' )
if base in (0, 1):
raise ValueError('base must be >= 2' )
if base > 36:
raise ValueError('base must be <= 36' )
__lowerCamelCase = ''
__lowerCamelCase = 0
__lowerCamelCase = 0
while div != 1:
__lowerCamelCase , __lowerCamelCase = divmod(UpperCamelCase__ , UpperCamelCase__ )
if base >= 11 and 9 < mod < 36:
__lowerCamelCase = ALPHABET_VALUES[str(UpperCamelCase__ )]
else:
__lowerCamelCase = str(UpperCamelCase__ )
new_value += actual_value
__lowerCamelCase = num // base
__lowerCamelCase = div
if div == 0:
return str(new_value[::-1] )
elif div == 1:
new_value += str(UpperCamelCase__ )
return str(new_value[::-1] )
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 37):
for num in range(10_00):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
)
| 469 | 1 |
def lowerCAmelCase_ ( __UpperCAmelCase: int ) -> bool:
if p < 2:
raise ValueError('''p should not be less than 2!''' )
elif p == 2:
return True
UpperCamelCase__ : List[str] = 4
UpperCamelCase__ : Optional[int] = (1 << p) - 1
for _ in range(p - 2 ):
UpperCamelCase__ : List[Any] = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 713 |
def lowerCAmelCase_ ( __UpperCAmelCase: dict ) -> set:
UpperCamelCase__ : int = set()
# edges = list of graph's edges
UpperCamelCase__ : str = get_edges(__UpperCAmelCase )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
UpperCamelCase__ ,UpperCamelCase__ : List[Any] = edges.pop()
chosen_vertices.add(__UpperCAmelCase )
chosen_vertices.add(__UpperCAmelCase )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(__UpperCAmelCase )
return chosen_vertices
def lowerCAmelCase_ ( __UpperCAmelCase: dict ) -> set:
UpperCamelCase__ : Optional[int] = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 369 | 0 |
import logging
from transformers import PretrainedConfig
_snake_case : List[str] = logging.getLogger(__name__)
_snake_case : Union[str, Any] = {
'bertabs-finetuned-cnndm': 'https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json',
}
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = """bertabs"""
def __init__( self : List[str] , lowerCAmelCase_ : List[Any]=3_0_5_2_2 , lowerCAmelCase_ : List[Any]=5_1_2 , lowerCAmelCase_ : str=6 , lowerCAmelCase_ : Union[str, Any]=5_1_2 , lowerCAmelCase_ : int=8 , lowerCAmelCase_ : List[str]=5_1_2 , lowerCAmelCase_ : Tuple=0.2 , lowerCAmelCase_ : Dict=6 , lowerCAmelCase_ : Dict=7_6_8 , lowerCAmelCase_ : Tuple=8 , lowerCAmelCase_ : Optional[Any]=2_0_4_8 , lowerCAmelCase_ : int=0.2 , **lowerCAmelCase_ : Optional[Any] , ) -> Optional[Any]:
super().__init__(**lowerCAmelCase_ )
__lowerCAmelCase = vocab_size
__lowerCAmelCase = max_pos
__lowerCAmelCase = enc_layers
__lowerCAmelCase = enc_hidden_size
__lowerCAmelCase = enc_heads
__lowerCAmelCase = enc_ff_size
__lowerCAmelCase = enc_dropout
__lowerCAmelCase = dec_layers
__lowerCAmelCase = dec_hidden_size
__lowerCAmelCase = dec_heads
__lowerCAmelCase = dec_ff_size
__lowerCAmelCase = dec_dropout
| 53 |
'''simple docstring'''
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
_A : int = (DDPMScheduler,)
def UpperCamelCase__ ( self : Union[str, Any] , **lowerCAmelCase__ : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
"""num_train_timesteps""": 1_0_0_0,
"""beta_start""": 0.00_01,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""variance_type""": """fixed_small""",
"""clip_sample""": True,
}
config.update(**lowerCAmelCase__ )
return config
def UpperCamelCase__ ( self : List[Any] ):
"""simple docstring"""
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase__ )
def UpperCamelCase__ ( self : int ):
"""simple docstring"""
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=lowerCAmelCase__ , beta_end=lowerCAmelCase__ )
def UpperCamelCase__ ( self : Optional[Any] ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCAmelCase__ )
def UpperCamelCase__ ( self : Any ):
"""simple docstring"""
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=lowerCAmelCase__ )
def UpperCamelCase__ ( self : Optional[int] ):
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowerCAmelCase__ )
def UpperCamelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
self.check_over_configs(thresholding=lowerCAmelCase__ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=lowerCAmelCase__ , prediction_type=lowerCAmelCase__ , sample_max_value=lowerCAmelCase__ , )
def UpperCamelCase__ ( self : int ):
"""simple docstring"""
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase__ )
def UpperCamelCase__ ( self : Dict ):
"""simple docstring"""
for t in [0, 5_0_0, 9_9_9]:
self.check_over_forward(time_step=lowerCAmelCase__ )
def UpperCamelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE : Optional[int] = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE : Optional[Any] = scheduler_class(**lowerCAmelCase__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.0_09_79 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.02 ) ) < 1E-5
def UpperCamelCase__ ( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE : Optional[Any] = scheduler_class(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = len(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_model()
__SCREAMING_SNAKE_CASE : Dict = self.dummy_sample_deter
__SCREAMING_SNAKE_CASE : List[Any] = torch.manual_seed(0 )
for t in reversed(range(lowerCAmelCase__ ) ):
# 1. predict noise residual
__SCREAMING_SNAKE_CASE : Optional[int] = model(lowerCAmelCase__ , lowerCAmelCase__ )
# 2. predict previous mean of sample x_t-1
__SCREAMING_SNAKE_CASE : int = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__SCREAMING_SNAKE_CASE : List[Any] = pred_prev_sample
__SCREAMING_SNAKE_CASE : Optional[int] = torch.sum(torch.abs(lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_sum.item() - 2_58.96_06 ) < 1E-2
assert abs(result_mean.item() - 0.33_72 ) < 1E-3
def UpperCamelCase__ ( self : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE : List[str] = self.get_scheduler_config(prediction_type="""v_prediction""" )
__SCREAMING_SNAKE_CASE : int = scheduler_class(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = len(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = self.dummy_model()
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_sample_deter
__SCREAMING_SNAKE_CASE : List[Any] = torch.manual_seed(0 )
for t in reversed(range(lowerCAmelCase__ ) ):
# 1. predict noise residual
__SCREAMING_SNAKE_CASE : Tuple = model(lowerCAmelCase__ , lowerCAmelCase__ )
# 2. predict previous mean of sample x_t-1
__SCREAMING_SNAKE_CASE : str = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__SCREAMING_SNAKE_CASE : Optional[int] = pred_prev_sample
__SCREAMING_SNAKE_CASE : str = torch.sum(torch.abs(lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : List[str] = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_sum.item() - 2_02.02_96 ) < 1E-2
assert abs(result_mean.item() - 0.26_31 ) < 1E-3
def UpperCamelCase__ ( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE : Optional[Any] = scheduler_class(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = [1_0_0, 8_7, 5_0, 1, 0]
scheduler.set_timesteps(timesteps=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = scheduler.timesteps
for i, timestep in enumerate(lowerCAmelCase__ ):
if i == len(lowerCAmelCase__ ) - 1:
__SCREAMING_SNAKE_CASE : List[str] = -1
else:
__SCREAMING_SNAKE_CASE : Dict = timesteps[i + 1]
__SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler.previous_timestep(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = prev_t.item()
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE : Tuple = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE : int = scheduler_class(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = [1_0_0, 8_7, 5_0, 5_1, 0]
with self.assertRaises(lowerCAmelCase__ , msg="""`custom_timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=lowerCAmelCase__ )
def UpperCamelCase__ ( self : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE : Optional[int] = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler_class(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = [1_0_0, 8_7, 5_0, 1, 0]
__SCREAMING_SNAKE_CASE : Union[str, Any] = len(lowerCAmelCase__ )
with self.assertRaises(lowerCAmelCase__ , msg="""Can only pass one of `num_inference_steps` or `custom_timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=lowerCAmelCase__ , timesteps=lowerCAmelCase__ )
def UpperCamelCase__ ( self : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE : Optional[int] = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE : List[Any] = scheduler_class(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = [scheduler.config.num_train_timesteps]
with self.assertRaises(
lowerCAmelCase__ , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=lowerCAmelCase__ ) | 578 | 0 |
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
_lowercase : int = abspath(join(dirname(__file__), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def lowerCamelCase ( UpperCAmelCase__ : Dict ) -> List[Any]:
config.addinivalue_line(
"""markers""" , """is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested""" )
config.addinivalue_line(
"""markers""" , """is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested""" )
config.addinivalue_line("""markers""" , """is_pipeline_test: mark test to run only when pipelines are tested""" )
config.addinivalue_line("""markers""" , """is_staging_test: mark test to run only in the staging environment""" )
config.addinivalue_line("""markers""" , """accelerate_tests: mark test that require accelerate""" )
config.addinivalue_line("""markers""" , """tool_tests: mark the tool tests that are run on their specific schedule""" )
def lowerCamelCase ( UpperCAmelCase__ : Optional[Any] ) -> Optional[Any]:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(SCREAMING_SNAKE_CASE__ )
def lowerCamelCase ( UpperCAmelCase__ : Optional[int] ) -> int:
from transformers.testing_utils import pytest_terminal_summary_main
lowercase_ : int = terminalreporter.config.getoption("""--make-reports""" )
if make_reports:
pytest_terminal_summary_main(SCREAMING_SNAKE_CASE__ , id=SCREAMING_SNAKE_CASE__ )
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : Dict ) -> Any:
if exitstatus == 5:
lowercase_ : List[Any] = 0
# Doctest custom flag to ignore output.
_lowercase : Any = doctest.register_optionflag("IGNORE_RESULT")
_lowercase : int = doctest.OutputChecker
class __magic_name__ ( _UpperCAmelCase):
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : Tuple , lowercase_ : str , lowercase_ : Optional[Any] ):
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , lowercase__ , lowercase__ , lowercase__ )
_lowercase : Union[str, Any] = CustomOutputChecker
_lowercase : Tuple = HfDoctestModule
_lowercase : Optional[int] = HfDocTestParser
| 703 | '''simple docstring'''
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
_lowercase : Optional[Any] = [
"Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of the"
" final seconds on board Flight 9525. The Germanwings co-pilot says he had a \"previous episode of severe"
" depression\" German airline confirms it knew of Andreas Lubitz's depression years before he took control.",
"The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal"
" accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC's"
" founding Rome Statute in January. Israel and the United States opposed the Palestinians' efforts to join the"
" body.",
"Amnesty International releases its annual report on the death penalty. The report catalogs the use of"
" state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the"
" world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital"
" punishment.",
]
_lowercase : List[Any] = [
"Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports ."
" Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz"
" had informed his Lufthansa training school of an episode of severe depression, airline says .",
"Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June ."
" Israel and the United States opposed the move, which could open the door to war crimes investigations against"
" Israelis .",
"Amnesty's annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to"
" death . Organization claims that governments around the world are using the threat of terrorism to advance"
" executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death"
" sentences up by 28% .",
]
def lowerCamelCase ( ) -> List[str]:
lowercase_ : str = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , bootstrap_aggregation=UpperCAmelCase__ , rouge_keys=["""rouge2""", """rougeL"""] )
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ : int = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , bootstrap_aggregation=UpperCAmelCase__ , rouge_keys=["""rouge2"""] )
assert (
pd.DataFrame(no_aggregation["""rouge2"""] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra["""rouge2"""] ).fmeasure.mean()
)
def lowerCamelCase ( ) -> Optional[Any]:
lowercase_ : Tuple = """rougeLsum"""
lowercase_ : Optional[Any] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=[k] )[k]
lowercase_ : Optional[Any] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=[k] )[k]
assert score > score_no_sep
def lowerCamelCase ( ) -> List[Any]:
lowercase_ : Optional[int] = ["""rouge1""", """rouge2""", """rougeL"""]
lowercase_ : Tuple = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=UpperCAmelCase__ )
lowercase_ : Tuple = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=UpperCAmelCase__ )
assert score_sep == score_no_sep
def lowerCamelCase ( ) -> Optional[Any]:
lowercase_ : Union[str, Any] = [
"""Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .""",
]
lowercase_ : List[str] = [
"""Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of"""
""" the final seconds on board Flight 9525.""",
]
assert calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ ) == calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ )
def lowerCamelCase ( ) -> Union[str, Any]:
lowercase_ : Optional[Any] = [
"""\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" """
]
lowercase_ : List[Any] = [
""" Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ."""
]
lowercase_ : Optional[int] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , rouge_keys=["""rougeLsum"""] , newline_sep=UpperCAmelCase__ )["""rougeLsum"""]
lowercase_ : List[str] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , rouge_keys=["""rougeLsum"""] )["""rougeLsum"""]
assert new_score > prev_score
def lowerCamelCase ( ) -> Tuple:
lowercase_ : Optional[int] = Path("""examples/seq2seq/test_data/wmt_en_ro""" )
lowercase_ : List[Any] = calculate_rouge_path(data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) )
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ : Union[str, Any] = calculate_rouge_path(
data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) , bootstrap_aggregation=UpperCAmelCase__ )
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
| 30 | 0 |
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Optional[int] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : int = {
't5-small': 'https://huggingface.co/t5-small/resolve/main/config.json',
't5-base': 'https://huggingface.co/t5-base/resolve/main/config.json',
't5-large': 'https://huggingface.co/t5-large/resolve/main/config.json',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/config.json',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/config.json',
}
class lowerCAmelCase__ ( UpperCamelCase_ ):
a__ : int = """t5"""
a__ : Union[str, Any] = ["""past_key_values"""]
a__ : Optional[int] = {"""hidden_size""": """d_model""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : str=3_21_28 , SCREAMING_SNAKE_CASE__ : List[Any]=5_12 , SCREAMING_SNAKE_CASE__ : Optional[int]=64 , SCREAMING_SNAKE_CASE__ : str=20_48 , SCREAMING_SNAKE_CASE__ : Optional[int]=6 , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Any=8 , SCREAMING_SNAKE_CASE__ : Any=32 , SCREAMING_SNAKE_CASE__ : List[Any]=1_28 , SCREAMING_SNAKE_CASE__ : List[Any]=0.1 , SCREAMING_SNAKE_CASE__ : Any=1e-6 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1.0 , SCREAMING_SNAKE_CASE__ : Optional[Any]="relu" , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : List[Any]=0 , SCREAMING_SNAKE_CASE__ : str=1 , **SCREAMING_SNAKE_CASE__ : Any , ) -> Optional[Any]:
__lowerCamelCase = vocab_size
__lowerCamelCase = d_model
__lowerCamelCase = d_kv
__lowerCamelCase = d_ff
__lowerCamelCase = num_layers
__lowerCamelCase = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__lowerCamelCase = num_heads
__lowerCamelCase = relative_attention_num_buckets
__lowerCamelCase = relative_attention_max_distance
__lowerCamelCase = dropout_rate
__lowerCamelCase = layer_norm_epsilon
__lowerCamelCase = initializer_factor
__lowerCamelCase = feed_forward_proj
__lowerCamelCase = use_cache
__lowerCamelCase = self.feed_forward_proj.split('''-''' )
__lowerCamelCase = act_info[-1]
__lowerCamelCase = act_info[0] == 'gated'
if len(_lowerCamelCase ) > 1 and act_info[0] != "gated" or len(_lowerCamelCase ) > 2:
raise ValueError(
f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
__lowerCamelCase = 'gelu_new'
super().__init__(
pad_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , is_encoder_decoder=_lowerCamelCase , **_lowerCamelCase , )
class lowerCAmelCase__ ( UpperCamelCase_ ):
@property
def __A ( self : List[str] ) -> Union[str, Any]:
__lowerCamelCase = {
'input_ids': {0: 'batch', 1: 'encoder_sequence'},
'attention_mask': {0: 'batch', 1: 'encoder_sequence'},
}
if self.use_past:
__lowerCamelCase = 'past_encoder_sequence + sequence'
__lowerCamelCase = {0: 'batch'}
__lowerCamelCase = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
__lowerCamelCase = {0: 'batch', 1: 'decoder_sequence'}
__lowerCamelCase = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(_lowerCamelCase , direction='''inputs''' )
return common_inputs
@property
def __A ( self : str ) -> Optional[Any]:
return 13
| 298 |
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
lowerCamelCase : List[str] = logging.getLogger(__name__)
def lowercase__( A , A ):
snake_case__ : int = np.argmax(A , axis=1 )
return np.sum(outputs == labels )
def lowercase__( A ):
with open(A , encoding='utf_8' ) as f:
snake_case__ : Dict = csv.reader(A )
snake_case__ : int = []
next(A ) # skip the first line
for line in tqdm(A ):
output.append((' '.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def lowercase__( A , A , A , A , A , A ):
snake_case__ : int = []
for dataset in encoded_datasets:
snake_case__ : str = len(A )
snake_case__ : Dict = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
snake_case__ : List[str] = np.zeros((n_batch, 2) , dtype=np.intaa )
snake_case__ : Dict = np.full((n_batch, 2, input_len) , fill_value=-1_0_0 , dtype=np.intaa )
snake_case__ : Dict = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(A ):
snake_case__ : Optional[Any] = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
snake_case__ : Dict = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
snake_case__ : Optional[int] = with_conta
snake_case__ : int = with_conta
snake_case__ : Optional[Any] = len(A ) - 1
snake_case__ : str = len(A ) - 1
snake_case__ : Any = with_conta
snake_case__ : Any = with_conta
snake_case__ : List[str] = mc_label
snake_case__ : List[Any] = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(A ) for t in all_inputs ) )
return tensor_datasets
def lowercase__( ):
snake_case__ : Any = argparse.ArgumentParser()
parser.add_argument('--model_name' , type=A , default='openai-gpt' , help='pretrained model name' )
parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' )
parser.add_argument('--do_eval' , action='store_true' , help='Whether to run eval on the dev set.' )
parser.add_argument(
'--output_dir' , default=A , type=A , required=A , help='The output directory where the model predictions and checkpoints will be written.' , )
parser.add_argument('--train_dataset' , type=A , default='' )
parser.add_argument('--eval_dataset' , type=A , default='' )
parser.add_argument('--seed' , type=A , default=4_2 )
parser.add_argument('--num_train_epochs' , type=A , default=3 )
parser.add_argument('--train_batch_size' , type=A , default=8 )
parser.add_argument('--eval_batch_size' , type=A , default=1_6 )
parser.add_argument('--adam_epsilon' , default=1e-8 , type=A , help='Epsilon for Adam optimizer.' )
parser.add_argument('--max_grad_norm' , type=A , default=1 )
parser.add_argument(
'--max_steps' , default=-1 , type=A , help=(
'If > 0: set total number of training steps to perform. Override num_train_epochs.'
) , )
parser.add_argument(
'--gradient_accumulation_steps' , type=A , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , )
parser.add_argument('--learning_rate' , type=A , default=6.25e-5 )
parser.add_argument('--warmup_steps' , default=0 , type=A , help='Linear warmup over warmup_steps.' )
parser.add_argument('--lr_schedule' , type=A , default='warmup_linear' )
parser.add_argument('--weight_decay' , type=A , default=0.01 )
parser.add_argument('--lm_coef' , type=A , default=0.9 )
parser.add_argument('--n_valid' , type=A , default=3_7_4 )
parser.add_argument('--server_ip' , type=A , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=A , default='' , help='Can be used for distant debugging.' )
snake_case__ : Optional[int] = parser.parse_args()
print(A )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=A )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
snake_case__ : Union[str, Any] = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
snake_case__ : int = torch.cuda.device_count()
logger.info('device: {}, n_gpu {}'.format(A , A ) )
if not args.do_train and not args.do_eval:
raise ValueError('At least one of `do_train` or `do_eval` must be True.' )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
snake_case__ : Tuple = ['_start_', '_delimiter_', '_classify_']
snake_case__ : Any = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(A )
snake_case__ : Tuple = tokenizer.convert_tokens_to_ids(A )
snake_case__ : int = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(A ) )
model.to(A )
# Load and encode the datasets
def tokenize_and_encode(A ):
if isinstance(A , A ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(A ) )
elif isinstance(A , A ):
return obj
return [tokenize_and_encode(A ) for o in obj]
logger.info('Encoding dataset...' )
snake_case__ : str = load_rocstories_dataset(args.train_dataset )
snake_case__ : List[str] = load_rocstories_dataset(args.eval_dataset )
snake_case__ : Optional[Any] = (train_dataset, eval_dataset)
snake_case__ : Any = tokenize_and_encode(A )
# Compute the max input length for the Transformer
snake_case__ : Any = model.config.n_positions // 2 - 2
snake_case__ : List[Any] = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
snake_case__ : int = min(A , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
snake_case__ : List[Any] = pre_process_datasets(A , A , A , *A )
snake_case__ , snake_case__ : Optional[Any] = tensor_datasets[0], tensor_datasets[1]
snake_case__ : Tuple = TensorDataset(*A )
snake_case__ : List[str] = RandomSampler(A )
snake_case__ : int = DataLoader(A , sampler=A , batch_size=args.train_batch_size )
snake_case__ : str = TensorDataset(*A )
snake_case__ : Dict = SequentialSampler(A )
snake_case__ : List[str] = DataLoader(A , sampler=A , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
snake_case__ : Union[str, Any] = args.max_steps
snake_case__ : Dict = args.max_steps // (len(A ) // args.gradient_accumulation_steps) + 1
else:
snake_case__ : int = len(A ) // args.gradient_accumulation_steps * args.num_train_epochs
snake_case__ : Tuple = list(model.named_parameters() )
snake_case__ : List[Any] = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
snake_case__ : Dict = [
{
'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
'weight_decay': args.weight_decay,
},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], 'weight_decay': 0.0},
]
snake_case__ : int = AdamW(A , lr=args.learning_rate , eps=args.adam_epsilon )
snake_case__ : Tuple = get_linear_schedule_with_warmup(
A , num_warmup_steps=args.warmup_steps , num_training_steps=A )
if args.do_train:
snake_case__ , snake_case__ , snake_case__ : Tuple = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc='Epoch' ):
snake_case__ : str = 0
snake_case__ : Dict = 0
snake_case__ : Dict = tqdm(A , desc='Training' )
for step, batch in enumerate(A ):
snake_case__ : List[str] = tuple(t.to(A ) for t in batch )
snake_case__ , snake_case__ , snake_case__ , snake_case__ : Optional[int] = batch
snake_case__ : int = model(A , mc_token_ids=A , lm_labels=A , mc_labels=A )
snake_case__ : Tuple = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
snake_case__ : Union[str, Any] = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
snake_case__ : Optional[int] = 'Training loss: {:.2e} lr: {:.2e}'.format(A , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
snake_case__ : List[Any] = model.module if hasattr(A , 'module' ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
snake_case__ : Union[str, Any] = os.path.join(args.output_dir , A )
snake_case__ : List[str] = os.path.join(args.output_dir , A )
torch.save(model_to_save.state_dict() , A )
model_to_save.config.to_json_file(A )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
snake_case__ : Optional[Any] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
snake_case__ : Optional[Any] = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(A )
if args.do_eval:
model.eval()
snake_case__ , snake_case__ : int = 0, 0
snake_case__ , snake_case__ : List[Any] = 0, 0
for batch in tqdm(A , desc='Evaluating' ):
snake_case__ : Tuple = tuple(t.to(A ) for t in batch )
snake_case__ , snake_case__ , snake_case__ , snake_case__ : List[Any] = batch
with torch.no_grad():
snake_case__ , snake_case__ , snake_case__ , snake_case__ : Union[str, Any] = model(
A , mc_token_ids=A , lm_labels=A , mc_labels=A )
snake_case__ : Union[str, Any] = mc_logits.detach().cpu().numpy()
snake_case__ : List[Any] = mc_labels.to('cpu' ).numpy()
snake_case__ : Optional[int] = accuracy(A , A )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
snake_case__ : str = eval_loss / nb_eval_steps
snake_case__ : Any = eval_accuracy / nb_eval_examples
snake_case__ : Tuple = tr_loss / nb_tr_steps if args.do_train else None
snake_case__ : Optional[int] = {'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'train_loss': train_loss}
snake_case__ : Optional[int] = os.path.join(args.output_dir , 'eval_results.txt' )
with open(A , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key in sorted(result.keys() ):
logger.info(' %s = %s' , A , str(result[key] ) )
writer.write('%s = %s\n' % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 170 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
_UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
_UpperCAmelCase : Any = {'''vocab_file''': '''vocab.txt'''}
_UpperCAmelCase : Dict = {
'''vocab_file''': {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt''',
}
}
_UpperCAmelCase : List[Any] = {
'''YituTech/conv-bert-base''': 5_12,
'''YituTech/conv-bert-medium-small''': 5_12,
'''YituTech/conv-bert-small''': 5_12,
}
_UpperCAmelCase : Union[str, Any] = {
'''YituTech/conv-bert-base''': {'''do_lower_case''': True},
'''YituTech/conv-bert-medium-small''': {'''do_lower_case''': True},
'''YituTech/conv-bert-small''': {'''do_lower_case''': True},
}
class __magic_name__ ( __SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ = ConvBertTokenizer
def __init__( self , snake_case_=None , snake_case_=None , snake_case_=True , snake_case_="[UNK]" , snake_case_="[SEP]" , snake_case_="[PAD]" , snake_case_="[CLS]" , snake_case_="[MASK]" , snake_case_=True , snake_case_=None , **snake_case_ , ):
super().__init__(
snake_case_ , tokenizer_file=snake_case_ , do_lower_case=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , tokenize_chinese_chars=snake_case_ , strip_accents=snake_case_ , **snake_case_ , )
lowercase =json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , snake_case_ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , snake_case_ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , snake_case_ ) != tokenize_chinese_chars
):
lowercase =getattr(snake_case_ , normalizer_state.pop('''type''' ) )
lowercase =do_lower_case
lowercase =strip_accents
lowercase =tokenize_chinese_chars
lowercase =normalizer_class(**snake_case_ )
lowercase =do_lower_case
def _A( self , snake_case_ , snake_case_=None ):
lowercase =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _A( self , snake_case_ , snake_case_ = None ):
lowercase =[self.sep_token_id]
lowercase =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _A( self , snake_case_ , snake_case_ = None ):
lowercase =self._tokenizer.model.save(snake_case_ , name=snake_case_ )
return tuple(snake_case_ )
| 702 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : List[str] = logging.get_logger(__name__)
_UpperCAmelCase : str = {
'''google/pix2struct-textcaps-base''': (
'''https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json'''
),
}
class __magic_name__ ( __SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = 'pix2struct_text_model'
UpperCamelCase__ = ['past_key_values']
UpperCamelCase__ = {
'hidden_size': 'hidden_size',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self , snake_case_=5_02_44 , snake_case_=7_68 , snake_case_=64 , snake_case_=20_48 , snake_case_=12 , snake_case_=12 , snake_case_=32 , snake_case_=1_28 , snake_case_=0.1 , snake_case_=1E-6 , snake_case_=1.0 , snake_case_="gelu_new" , snake_case_=0 , snake_case_=False , snake_case_=0 , snake_case_=1 , snake_case_=False , snake_case_=True , **snake_case_ , ):
lowercase =vocab_size
lowercase =hidden_size
lowercase =d_kv
lowercase =d_ff
lowercase =num_layers
lowercase =num_heads
lowercase =relative_attention_num_buckets
lowercase =relative_attention_max_distance
lowercase =dropout_rate
lowercase =layer_norm_epsilon
lowercase =initializer_factor
lowercase =use_cache
lowercase =eos_token_id
lowercase =decoder_start_token_id
# for backwards compatibility
lowercase =dense_act_fn
super().__init__(
pad_token_id=snake_case_ , eos_token_id=snake_case_ , decoder_start_token_id=snake_case_ , tie_word_embeddings=snake_case_ , is_decoder=snake_case_ , **snake_case_ , )
@classmethod
def _A( cls , snake_case_ , **snake_case_ ):
cls._set_token_in_kwargs(snake_case_ )
lowercase , lowercase =cls.get_config_dict(snake_case_ , **snake_case_ )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get('''model_type''' ) == "pix2struct":
lowercase =config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(snake_case_ , **snake_case_ )
class __magic_name__ ( __SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = 'pix2struct_vision_model'
def __init__( self , snake_case_=7_68 , snake_case_=7_68 , snake_case_=20_48 , snake_case_=64 , snake_case_=12 , snake_case_=12 , snake_case_="gelu_new" , snake_case_=1E-6 , snake_case_=0.0 , snake_case_=0.0 , snake_case_=1E-10 , snake_case_=1.0 , snake_case_=40_96 , snake_case_=32 , snake_case_=1_28 , **snake_case_ , ):
super().__init__(**snake_case_ )
lowercase =hidden_size
lowercase =patch_embed_hidden_size
lowercase =d_ff
lowercase =dropout_rate
lowercase =num_hidden_layers
lowercase =num_attention_heads
lowercase =initializer_range
lowercase =initializer_factor
lowercase =attention_dropout
lowercase =layer_norm_eps
lowercase =dense_act_fn
lowercase =seq_len
lowercase =relative_attention_num_buckets
lowercase =relative_attention_max_distance
lowercase =d_kv
@classmethod
def _A( cls , snake_case_ , **snake_case_ ):
cls._set_token_in_kwargs(snake_case_ )
lowercase , lowercase =cls.get_config_dict(snake_case_ , **snake_case_ )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get('''model_type''' ) == "pix2struct":
lowercase =config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(snake_case_ , **snake_case_ )
class __magic_name__ ( __SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = 'pix2struct'
UpperCamelCase__ = True
def __init__( self , snake_case_=None , snake_case_=None , snake_case_=1.0 , snake_case_=0.02 , snake_case_=False , snake_case_=False , snake_case_=True , **snake_case_ , ):
super().__init__(tie_word_embeddings=snake_case_ , is_encoder_decoder=snake_case_ , **snake_case_ )
if text_config is None:
lowercase ={}
logger.info('''text_config is None. Initializing the Pix2StructTextConfig with default values.''' )
if vision_config is None:
lowercase ={}
logger.info('''vision_config is None. Initializing the Pix2StructVisionConfig with default values.''' )
lowercase =PixaStructTextConfig(**snake_case_ )
lowercase =PixaStructVisionConfig(**snake_case_ )
lowercase =self.text_config.decoder_start_token_id
lowercase =self.text_config.pad_token_id
lowercase =self.text_config.eos_token_id
lowercase =initializer_factor
lowercase =initializer_range
lowercase =self.initializer_range
lowercase =self.initializer_range
lowercase =is_vqa
@classmethod
def _A( cls , snake_case_ , snake_case_ , **snake_case_ ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **snake_case_ )
def _A( self ):
lowercase =copy.deepcopy(self.__dict__ )
lowercase =self.text_config.to_dict()
lowercase =self.vision_config.to_dict()
lowercase =self.__class__.model_type
return output
| 145 | 0 |
'''simple docstring'''
import unittest
import numpy as np
def _snake_case ( _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : np.ndarray | None = None , ) -> np.ndarray:
"""simple docstring"""
lowerCAmelCase = np.shape(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = np.shape(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = np.shape(_SCREAMING_SNAKE_CASE )
if shape_a[0] != shape_b[0]:
lowerCAmelCase = (
"""Expected the same number of rows for A and B. """
f'Instead found A of size {shape_a} and B of size {shape_b}'
)
raise ValueError(_SCREAMING_SNAKE_CASE )
if shape_b[1] != shape_c[1]:
lowerCAmelCase = (
"""Expected the same number of columns for B and C. """
f'Instead found B of size {shape_b} and C of size {shape_c}'
)
raise ValueError(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = pseudo_inv
if a_inv is None:
try:
lowerCAmelCase = np.linalg.inv(_SCREAMING_SNAKE_CASE )
except np.linalg.LinAlgError:
raise ValueError(
"""Input matrix A is not invertible. Cannot compute Schur complement.""" )
return mat_c - mat_b.T @ a_inv @ mat_b
class __snake_case( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self ) -> None:
lowerCAmelCase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowerCAmelCase = np.array([[0, 3], [3, 0], [2, 3]] )
lowerCAmelCase = np.array([[2, 1], [6, 3]] )
lowerCAmelCase = schur_complement(A_ , A_ , A_ )
lowerCAmelCase = np.block([[a, b], [b.T, c]] )
lowerCAmelCase = np.linalg.det(A_ )
lowerCAmelCase = np.linalg.det(A_ )
lowerCAmelCase = np.linalg.det(A_ )
self.assertAlmostEqual(A_ , det_a * det_s )
def __snake_case ( self ) -> None:
lowerCAmelCase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowerCAmelCase = np.array([[0, 3], [3, 0], [2, 3]] )
lowerCAmelCase = np.array([[2, 1], [6, 3]] )
with self.assertRaises(A_ ):
schur_complement(A_ , A_ , A_ )
def __snake_case ( self ) -> None:
lowerCAmelCase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowerCAmelCase = np.array([[0, 3], [3, 0], [2, 3]] )
lowerCAmelCase = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(A_ ):
schur_complement(A_ , A_ , A_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main() | 433 |
'''simple docstring'''
import re
def _snake_case ( _SCREAMING_SNAKE_CASE : str ) -> bool:
"""simple docstring"""
lowerCAmelCase = re.compile(
R"""^(?:0|94|\+94|0{2}94)""" R"""7(0|1|2|4|5|6|7|8)""" R"""(-| |)""" R"""\d{7}$""" )
return bool(re.search(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
UpperCAmelCase = '0094702343221'
print(is_sri_lankan_phone_number(phone)) | 433 | 1 |
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = False, False, False
@dataclass
class __SCREAMING_SNAKE_CASE :
snake_case : Optional[int] = None
snake_case : bool = True
snake_case : bool = True
snake_case : Optional[str] = None
# Automatically constructed
snake_case : ClassVar[str] = "dict"
snake_case : ClassVar[Any] = pa.struct({"""bytes""": pa.binary(), """path""": pa.string()} )
snake_case : str = field(default="""Audio""" , init=_a , repr=_a )
def __call__( self ):
return self.pa_type
def _lowerCamelCase ( self , __lowerCAmelCase ):
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("""To support encoding audio data, please install 'soundfile'.""" ) from err
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
return {"bytes": None, "path": value}
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
UpperCamelCase__ = BytesIO()
sf.write(__lowerCAmelCase , value["""array"""] , value["""sampling_rate"""] , format="""wav""" )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("""path""" ) is not None and os.path.isfile(value["""path"""] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("""pcm""" ):
# "PCM" only has raw audio bytes
if value.get("""sampling_rate""" ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("""To use PCM files, please specify a 'sampling_rate' in Audio object""" )
if value.get("""bytes""" ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
UpperCamelCase__ = np.frombuffer(value["""bytes"""] , dtype=np.intaa ).astype(np.floataa ) / 32767
else:
UpperCamelCase__ = np.memmap(value["""path"""] , dtype="""h""" , mode="""r""" ).astype(np.floataa ) / 32767
UpperCamelCase__ = BytesIO(bytes() )
sf.write(__lowerCAmelCase , __lowerCAmelCase , value["""sampling_rate"""] , format="""wav""" )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("""path""" )}
elif value.get("""bytes""" ) is not None or value.get("""path""" ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("""bytes""" ), "path": value.get("""path""" )}
else:
raise ValueError(
f"""An audio sample should have one of 'path' or 'bytes' but they are missing or None in {value}.""" )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
if not self.decode:
raise RuntimeError("""Decoding is disabled for this feature. Please use Audio(decode=True) instead.""" )
UpperCamelCase__ , UpperCamelCase__ = (value["""path"""], BytesIO(value["""bytes"""] )) if value["""bytes"""] is not None else (value["""path"""], None)
if path is None and file is None:
raise ValueError(f"""An audio sample should have one of 'path' or 'bytes' but both are None in {value}.""" )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("""To support decoding audio files, please install 'librosa' and 'soundfile'.""" ) from err
UpperCamelCase__ = xsplitext(__lowerCAmelCase )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"""Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, """
"""You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. """ )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"""Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, """
"""You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. """ )
if file is None:
UpperCamelCase__ = token_per_repo_id or {}
UpperCamelCase__ = path.split("""::""" )[-1]
try:
UpperCamelCase__ = string_to_dict(__lowerCAmelCase , config.HUB_DATASETS_URL )["""repo_id"""]
UpperCamelCase__ = token_per_repo_id[repo_id]
except (ValueError, KeyError):
UpperCamelCase__ = None
with xopen(__lowerCAmelCase , """rb""" , use_auth_token=__lowerCAmelCase ) as f:
UpperCamelCase__ , UpperCamelCase__ = sf.read(__lowerCAmelCase )
else:
UpperCamelCase__ , UpperCamelCase__ = sf.read(__lowerCAmelCase )
UpperCamelCase__ = array.T
if self.mono:
UpperCamelCase__ = librosa.to_mono(__lowerCAmelCase )
if self.sampling_rate and self.sampling_rate != sampling_rate:
UpperCamelCase__ = librosa.resample(__lowerCAmelCase , orig_sr=__lowerCAmelCase , target_sr=self.sampling_rate )
UpperCamelCase__ = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def _lowerCamelCase ( self ):
from .features import Value
if self.decode:
raise ValueError("""Cannot flatten a decoded Audio feature.""" )
return {
"bytes": Value("""binary""" ),
"path": Value("""string""" ),
}
def _lowerCamelCase ( self , __lowerCAmelCase ):
if pa.types.is_string(storage.type ):
UpperCamelCase__ = pa.array([None] * len(__lowerCAmelCase ) , type=pa.binary() )
UpperCamelCase__ = pa.StructArray.from_arrays([bytes_array, storage] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
UpperCamelCase__ = pa.array([None] * len(__lowerCAmelCase ) , type=pa.string() )
UpperCamelCase__ = pa.StructArray.from_arrays([storage, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("""array""" ):
UpperCamelCase__ = pa.array([Audio().encode_example(__lowerCAmelCase ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("""bytes""" ) >= 0:
UpperCamelCase__ = storage.field("""bytes""" )
else:
UpperCamelCase__ = pa.array([None] * len(__lowerCAmelCase ) , type=pa.binary() )
if storage.type.get_field_index("""path""" ) >= 0:
UpperCamelCase__ = storage.field("""path""" )
else:
UpperCamelCase__ = pa.array([None] * len(__lowerCAmelCase ) , type=pa.string() )
UpperCamelCase__ = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
return array_cast(__lowerCAmelCase , self.pa_type )
def _lowerCamelCase ( self , __lowerCAmelCase ):
@no_op_if_value_is_null
def path_to_bytes(__lowerCAmelCase ):
with xopen(__lowerCAmelCase , """rb""" ) as f:
UpperCamelCase__ = f.read()
return bytes_
UpperCamelCase__ = pa.array(
[
(path_to_bytes(x["""path"""] ) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
UpperCamelCase__ = pa.array(
[os.path.basename(__lowerCAmelCase ) if path is not None else None for path in storage.field("""path""" ).to_pylist()] , type=pa.string() , )
UpperCamelCase__ = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(__lowerCAmelCase , self.pa_type )
| 709 |
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
snake_case : Any = MODEL_FOR_MASKED_LM_MAPPING
snake_case : Dict = TF_MODEL_FOR_MASKED_LM_MAPPING
def _lowerCamelCase ( self ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def _lowerCamelCase ( self ):
UpperCamelCase__ = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""tf""" )
UpperCamelCase__ = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(__lowerCAmelCase , decimals=6 ) , [
{"""sequence""": """My name is grouped""", """score""": 2.1E-05, """token""": 38015, """token_str""": """ grouped"""},
{"""sequence""": """My name is accuser""", """score""": 2.1E-05, """token""": 25506, """token_str""": """ accuser"""},
] , )
UpperCamelCase__ = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(__lowerCAmelCase , decimals=6 ) , [
{
"""sequence""": """The largest city in France is grouped""",
"""score""": 2.1E-05,
"""token""": 38015,
"""token_str""": """ grouped""",
},
{
"""sequence""": """The largest city in France is accuser""",
"""score""": 2.1E-05,
"""token""": 25506,
"""token_str""": """ accuser""",
},
] , )
UpperCamelCase__ = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(__lowerCAmelCase , decimals=6 ) , [
{"""sequence""": """My name is Clara""", """score""": 2E-05, """token""": 13606, """token_str""": """ Clara"""},
{"""sequence""": """My name is Patrick""", """score""": 2E-05, """token""": 3499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Te""", """score""": 1.9E-05, """token""": 2941, """token_str""": """ Te"""},
] , )
@require_torch
def _lowerCamelCase ( self ):
UpperCamelCase__ = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""pt""" )
UpperCamelCase__ = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(__lowerCAmelCase , decimals=6 ) , [
{"""sequence""": """My name is Maul""", """score""": 2.2E-05, """token""": 35676, """token_str""": """ Maul"""},
{"""sequence""": """My name isELS""", """score""": 2.2E-05, """token""": 16416, """token_str""": """ELS"""},
] , )
UpperCamelCase__ = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(__lowerCAmelCase , decimals=6 ) , [
{
"""sequence""": """The largest city in France is Maul""",
"""score""": 2.2E-05,
"""token""": 35676,
"""token_str""": """ Maul""",
},
{"""sequence""": """The largest city in France isELS""", """score""": 2.2E-05, """token""": 16416, """token_str""": """ELS"""},
] , )
UpperCamelCase__ = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(__lowerCAmelCase , decimals=6 ) , [
{"""sequence""": """My name is Patrick""", """score""": 2.1E-05, """token""": 3499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Te""", """score""": 2E-05, """token""": 2941, """token_str""": """ Te"""},
{"""sequence""": """My name is Clara""", """score""": 2E-05, """token""": 13606, """token_str""": """ Clara"""},
] , )
UpperCamelCase__ = unmasker("""My name is <mask> <mask>""" , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCAmelCase , decimals=6 ) , [
[
{
"""score""": 2.2E-05,
"""token""": 35676,
"""token_str""": """ Maul""",
"""sequence""": """<s>My name is Maul<mask></s>""",
},
{"""score""": 2.2E-05, """token""": 16416, """token_str""": """ELS""", """sequence""": """<s>My name isELS<mask></s>"""},
],
[
{
"""score""": 2.2E-05,
"""token""": 35676,
"""token_str""": """ Maul""",
"""sequence""": """<s>My name is<mask> Maul</s>""",
},
{"""score""": 2.2E-05, """token""": 16416, """token_str""": """ELS""", """sequence""": """<s>My name is<mask>ELS</s>"""},
],
] , )
@require_torch_gpu
def _lowerCamelCase ( self ):
UpperCamelCase__ = pipeline("""fill-mask""" , model="""hf-internal-testing/tiny-random-distilbert""" , device=0 , framework="""pt""" )
# convert model to fp16
pipe.model.half()
UpperCamelCase__ = pipe("""Paris is the [MASK] of France.""" )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
@slow
@require_torch
def _lowerCamelCase ( self ):
UpperCamelCase__ = pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""pt""" )
self.run_large_test(__lowerCAmelCase )
@slow
@require_tf
def _lowerCamelCase ( self ):
UpperCamelCase__ = pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""tf""" )
self.run_large_test(__lowerCAmelCase )
def _lowerCamelCase ( self , __lowerCAmelCase ):
UpperCamelCase__ = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
{"""sequence""": """My name is John""", """score""": 0.008, """token""": 610, """token_str""": """ John"""},
{"""sequence""": """My name is Chris""", """score""": 0.007, """token""": 1573, """token_str""": """ Chris"""},
] , )
UpperCamelCase__ = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
{
"""sequence""": """The largest city in France is Paris""",
"""score""": 0.251,
"""token""": 2201,
"""token_str""": """ Paris""",
},
{
"""sequence""": """The largest city in France is Lyon""",
"""score""": 0.214,
"""token""": 12790,
"""token_str""": """ Lyon""",
},
] , )
UpperCamelCase__ = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
{"""sequence""": """My name is Patrick""", """score""": 0.005, """token""": 3499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Clara""", """score""": 0.000, """token""": 13606, """token_str""": """ Clara"""},
{"""sequence""": """My name is Te""", """score""": 0.000, """token""": 2941, """token_str""": """ Te"""},
] , )
@require_torch
def _lowerCamelCase ( self ):
UpperCamelCase__ = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""pt""" )
UpperCamelCase__ = None
UpperCamelCase__ = None
self.run_pipeline_test(__lowerCAmelCase , [] )
@require_tf
def _lowerCamelCase ( self ):
UpperCamelCase__ = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""tf""" )
UpperCamelCase__ = None
UpperCamelCase__ = None
self.run_pipeline_test(__lowerCAmelCase , [] )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("""The provided tokenizer has no mask token, (probably reformer or wav2vec2)""" )
UpperCamelCase__ = FillMaskPipeline(model=__lowerCAmelCase , tokenizer=__lowerCAmelCase )
UpperCamelCase__ = [
f"""This is another {tokenizer.mask_token} test""",
]
return fill_masker, examples
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = fill_masker.tokenizer
UpperCamelCase__ = fill_masker.model
UpperCamelCase__ = fill_masker(
f"""This is a {tokenizer.mask_token}""" , )
self.assertEqual(
__lowerCAmelCase , [
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
] , )
UpperCamelCase__ = fill_masker([f"""This is a {tokenizer.mask_token}"""] )
self.assertEqual(
__lowerCAmelCase , [
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
] , )
UpperCamelCase__ = fill_masker([f"""This is a {tokenizer.mask_token}""", f"""Another {tokenizer.mask_token} great test."""] )
self.assertEqual(
__lowerCAmelCase , [
[
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
],
[
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
],
] , )
with self.assertRaises(__lowerCAmelCase ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(__lowerCAmelCase ):
fill_masker("""This is""" )
self.run_test_top_k(__lowerCAmelCase , __lowerCAmelCase )
self.run_test_targets(__lowerCAmelCase , __lowerCAmelCase )
self.run_test_top_k_targets(__lowerCAmelCase , __lowerCAmelCase )
self.fill_mask_with_duplicate_targets_and_top_k(__lowerCAmelCase , __lowerCAmelCase )
self.fill_mask_with_multiple_masks(__lowerCAmelCase , __lowerCAmelCase )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = tokenizer.get_vocab()
UpperCamelCase__ = sorted(vocab.keys() )[:2]
# Pipeline argument
UpperCamelCase__ = FillMaskPipeline(model=__lowerCAmelCase , tokenizer=__lowerCAmelCase , targets=__lowerCAmelCase )
UpperCamelCase__ = fill_masker(f"""This is a {tokenizer.mask_token}""" )
self.assertEqual(
__lowerCAmelCase , [
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
] , )
UpperCamelCase__ = {vocab[el] for el in targets}
self.assertEqual({el["""token"""] for el in outputs} , __lowerCAmelCase )
UpperCamelCase__ = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["""token_str"""] for el in outputs} , set(__lowerCAmelCase ) )
# Call argument
UpperCamelCase__ = FillMaskPipeline(model=__lowerCAmelCase , tokenizer=__lowerCAmelCase )
UpperCamelCase__ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=__lowerCAmelCase )
self.assertEqual(
__lowerCAmelCase , [
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
] , )
UpperCamelCase__ = {vocab[el] for el in targets}
self.assertEqual({el["""token"""] for el in outputs} , __lowerCAmelCase )
UpperCamelCase__ = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["""token_str"""] for el in outputs} , set(__lowerCAmelCase ) )
# Score equivalence
UpperCamelCase__ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=__lowerCAmelCase )
UpperCamelCase__ = [top_mask["""token_str"""] for top_mask in outputs]
UpperCamelCase__ = [top_mask["""score"""] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(__lowerCAmelCase ) == set(__lowerCAmelCase ):
UpperCamelCase__ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=__lowerCAmelCase )
UpperCamelCase__ = [top_mask["""score"""] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(__lowerCAmelCase ) , nested_simplify(__lowerCAmelCase ) )
# Raises with invalid
with self.assertRaises(__lowerCAmelCase ):
UpperCamelCase__ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(__lowerCAmelCase ):
UpperCamelCase__ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=[""""""] )
with self.assertRaises(__lowerCAmelCase ):
UpperCamelCase__ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets="""""" )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = FillMaskPipeline(model=__lowerCAmelCase , tokenizer=__lowerCAmelCase , top_k=2 )
UpperCamelCase__ = fill_masker(f"""This is a {tokenizer.mask_token}""" )
self.assertEqual(
__lowerCAmelCase , [
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
] , )
UpperCamelCase__ = FillMaskPipeline(model=__lowerCAmelCase , tokenizer=__lowerCAmelCase )
UpperCamelCase__ = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=2 )
self.assertEqual(
__lowerCAmelCase , [
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
] , )
self.assertEqual(nested_simplify(__lowerCAmelCase ) , nested_simplify(__lowerCAmelCase ) )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = tokenizer.get_vocab()
UpperCamelCase__ = FillMaskPipeline(model=__lowerCAmelCase , tokenizer=__lowerCAmelCase )
# top_k=2, ntargets=3
UpperCamelCase__ = sorted(vocab.keys() )[:3]
UpperCamelCase__ = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=2 , targets=__lowerCAmelCase )
# If we use the most probably targets, and filter differently, we should still
# have the same results
UpperCamelCase__ = [el["""token_str"""] for el in sorted(__lowerCAmelCase , key=lambda __lowerCAmelCase : x["score"] , reverse=__lowerCAmelCase )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(__lowerCAmelCase ).issubset(__lowerCAmelCase ):
UpperCamelCase__ = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=3 , targets=__lowerCAmelCase )
# They should yield exactly the same result
self.assertEqual(nested_simplify(__lowerCAmelCase ) , nested_simplify(__lowerCAmelCase ) )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = FillMaskPipeline(model=__lowerCAmelCase , tokenizer=__lowerCAmelCase )
UpperCamelCase__ = tokenizer.get_vocab()
# String duplicates + id duplicates
UpperCamelCase__ = sorted(vocab.keys() )[:3]
UpperCamelCase__ = [targets[0], targets[1], targets[0], targets[2], targets[1]]
UpperCamelCase__ = fill_masker(f"""My name is {tokenizer.mask_token}""" , targets=__lowerCAmelCase , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(__lowerCAmelCase ) , 3 )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = FillMaskPipeline(model=__lowerCAmelCase , tokenizer=__lowerCAmelCase )
UpperCamelCase__ = fill_masker(
f"""This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}""" , top_k=2 )
self.assertEqual(
__lowerCAmelCase , [
[
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
],
[
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
],
[
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
],
] , )
| 548 | 0 |
'''simple docstring'''
from math import isclose, sqrt
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
__magic_name__ : int = point_y / 4 / point_x
__magic_name__ : Dict = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
__magic_name__ : Dict = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
__magic_name__ : Any = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
__magic_name__ : Any = outgoing_gradient**2 + 4
__magic_name__ : List[Any] = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
__magic_name__ : Union[str, Any] = (point_y - outgoing_gradient * point_x) ** 2 - 100
__magic_name__ : str = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
__magic_name__ : Optional[int] = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
__magic_name__ : Any = x_minus if isclose(UpperCamelCase__ , UpperCamelCase__ ) else x_plus
__magic_name__ : Optional[Any] = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def _UpperCamelCase ( UpperCamelCase__ = 1.4 , UpperCamelCase__ = -9.6 ):
"""simple docstring"""
__magic_name__ : int = 0
__magic_name__ : float = first_x_coord
__magic_name__ : float = first_y_coord
__magic_name__ : float = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
__magic_name__ , __magic_name__ , __magic_name__ : Union[str, Any] = next_point(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(f"{solution() = }") | 436 |
'''simple docstring'''
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
__magic_name__ : Optional[Any] = XCLIPTextConfig()
# derive patch size from model name
__magic_name__ : Tuple = model_name.find("patch" )
__magic_name__ : Dict = int(model_name[start_idx + len("patch" ) : start_idx + len("patch" ) + 2] )
__magic_name__ : Any = XCLIPVisionConfig(patch_size=UpperCamelCase__ , num_frames=UpperCamelCase__ )
if "large" in model_name:
__magic_name__ : List[str] = 768
__magic_name__ : Optional[Any] = 3072
__magic_name__ : str = 12
__magic_name__ : Optional[int] = 1024
__magic_name__ : int = 4096
__magic_name__ : Optional[Any] = 16
__magic_name__ : Union[str, Any] = 24
__magic_name__ : Union[str, Any] = 768
__magic_name__ : List[str] = 3072
if model_name == "xclip-large-patch14-16-frames":
__magic_name__ : Optional[int] = 336
__magic_name__ : Optional[Any] = XCLIPConfig.from_text_vision_configs(UpperCamelCase__ , UpperCamelCase__ )
if "large" in model_name:
__magic_name__ : Any = 768
return config
def _UpperCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
if name == "token_embedding.weight":
__magic_name__ : Any = name.replace("token_embedding.weight" , "text_model.embeddings.token_embedding.weight" )
if name == "positional_embedding":
__magic_name__ : Union[str, Any] = name.replace("positional_embedding" , "text_model.embeddings.position_embedding.weight" )
if "ln_1" in name:
__magic_name__ : Optional[Any] = name.replace("ln_1" , "layer_norm1" )
if "ln_2" in name:
__magic_name__ : Dict = name.replace("ln_2" , "layer_norm2" )
if "c_fc" in name:
__magic_name__ : List[Any] = name.replace("c_fc" , "fc1" )
if "c_proj" in name:
__magic_name__ : Optional[int] = name.replace("c_proj" , "fc2" )
if name.startswith("transformer.resblocks" ):
__magic_name__ : List[str] = name.replace("transformer.resblocks" , "text_model.encoder.layers" )
if "attn.out_proj" in name and "message" not in name:
__magic_name__ : Tuple = name.replace("attn.out_proj" , "self_attn.out_proj" )
if "ln_final" in name:
__magic_name__ : str = name.replace("ln_final" , "text_model.final_layer_norm" )
# visual encoder
if name == "visual.class_embedding":
__magic_name__ : Any = name.replace("visual.class_embedding" , "vision_model.embeddings.class_embedding" )
if name == "visual.positional_embedding":
__magic_name__ : str = name.replace("visual.positional_embedding" , "vision_model.embeddings.position_embedding.weight" )
if name.startswith("visual.transformer.resblocks" ):
__magic_name__ : Any = name.replace("visual.transformer.resblocks" , "vision_model.encoder.layers" )
if "visual.conv1" in name:
__magic_name__ : int = name.replace("visual.conv1" , "vision_model.embeddings.patch_embedding" )
if "visual.ln_pre" in name:
__magic_name__ : Optional[int] = name.replace("visual.ln_pre" , "vision_model.pre_layernorm" )
if "visual.ln_post" in name:
__magic_name__ : Tuple = name.replace("visual.ln_post" , "vision_model.post_layernorm" )
if "visual.proj" in name:
__magic_name__ : Optional[Any] = name.replace("visual.proj" , "visual_projection.weight" )
if "text_projection" in name:
__magic_name__ : List[str] = name.replace("text_projection" , "text_projection.weight" )
# things on top
if "prompts_visual_proj" in name:
__magic_name__ : int = name.replace("prompts_visual_proj" , "prompts_visual_projection" )
if "prompts_visual_ln" in name:
__magic_name__ : Tuple = name.replace("prompts_visual_ln" , "prompts_visual_layernorm" )
# mit
if name == "mit.positional_embedding":
__magic_name__ : Union[str, Any] = name.replace("positional" , "position" )
if name.startswith("mit.resblocks" ):
__magic_name__ : Dict = name.replace("mit.resblocks" , "mit.encoder.layers" )
# prompts generator
if name.startswith("prompts_generator.norm" ):
__magic_name__ : Union[str, Any] = name.replace("prompts_generator.norm" , "prompts_generator.layernorm" )
return name
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
__magic_name__ : str = orig_state_dict.pop(UpperCamelCase__ )
if "attn.in_proj" in key:
__magic_name__ : List[str] = key.split("." )
if key.startswith("visual" ):
__magic_name__ : int = key_split[3]
__magic_name__ : int = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
__magic_name__ : Union[str, Any] = val[
:dim, :
]
__magic_name__ : Optional[Any] = val[
dim : dim * 2, :
]
__magic_name__ : Optional[Any] = val[
-dim:, :
]
else:
__magic_name__ : Optional[int] = val[
:dim
]
__magic_name__ : Union[str, Any] = val[
dim : dim * 2
]
__magic_name__ : Optional[Any] = val[
-dim:
]
else:
if "weight" in key:
__magic_name__ : Tuple = val[
:dim, :
]
__magic_name__ : Any = val[
dim : dim * 2, :
]
__magic_name__ : List[str] = val[
-dim:, :
]
else:
__magic_name__ : Optional[int] = val[:dim]
__magic_name__ : str = val[
dim : dim * 2
]
__magic_name__ : List[str] = val[-dim:]
elif key.startswith("mit" ):
__magic_name__ : Any = key_split[2]
__magic_name__ : Tuple = config.vision_config.mit_hidden_size
if "weight" in key:
__magic_name__ : str = val[:dim, :]
__magic_name__ : str = val[dim : dim * 2, :]
__magic_name__ : Union[str, Any] = val[-dim:, :]
else:
__magic_name__ : Optional[Any] = val[:dim]
__magic_name__ : str = val[dim : dim * 2]
__magic_name__ : Tuple = val[-dim:]
else:
__magic_name__ : Union[str, Any] = key_split[2]
__magic_name__ : List[str] = config.text_config.hidden_size
if "weight" in key:
__magic_name__ : Optional[Any] = val[:dim, :]
__magic_name__ : Optional[int] = val[
dim : dim * 2, :
]
__magic_name__ : Optional[Any] = val[-dim:, :]
else:
__magic_name__ : int = val[:dim]
__magic_name__ : List[Any] = val[
dim : dim * 2
]
__magic_name__ : Union[str, Any] = val[-dim:]
else:
__magic_name__ : Tuple = rename_key(UpperCamelCase__ )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
__magic_name__ : Tuple = val.T
__magic_name__ : int = val
return orig_state_dict
def _UpperCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
if num_frames == 8:
__magic_name__ : Any = "eating_spaghetti_8_frames.npy"
elif num_frames == 16:
__magic_name__ : Tuple = "eating_spaghetti.npy"
elif num_frames == 32:
__magic_name__ : int = "eating_spaghetti_32_frames.npy"
__magic_name__ : str = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename=UpperCamelCase__ , repo_type="dataset" , )
__magic_name__ : Dict = np.load(UpperCamelCase__ )
return list(UpperCamelCase__ )
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=False ):
"""simple docstring"""
__magic_name__ : List[str] = {
# fully supervised kinetics-400 checkpoints
"xclip-base-patch32": "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth",
"xclip-base-patch32-16-frames": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth"
),
"xclip-base-patch16": "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth",
"xclip-base-patch16-16-frames": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth"
),
"xclip-large-patch14": "https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb",
"xclip-large-patch14-16-frames": "https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f",
# fully supervised kinetics-600 checkpoints
"xclip-base-patch16-kinetics-600": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth"
),
"xclip-base-patch16-kinetics-600-16-frames": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth"
),
"xclip-large-patch14-kinetics-600": "https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be",
# few shot
"xclip-base-patch16-hmdb-2-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth"
),
"xclip-base-patch16-hmdb-4-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth"
),
"xclip-base-patch16-hmdb-8-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth"
),
"xclip-base-patch16-hmdb-16-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth"
),
"xclip-base-patch16-ucf-2-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth"
),
"xclip-base-patch16-ucf-4-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth"
),
"xclip-base-patch16-ucf-8-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth"
),
"xclip-base-patch16-ucf-16-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth"
),
# zero shot
"xclip-base-patch16-zero-shot": "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth",
}
__magic_name__ : List[Any] = model_to_url[model_name]
__magic_name__ : Any = 8
if "16-frames" in model_name:
__magic_name__ : Optional[int] = 16
elif "shot" in model_name:
__magic_name__ : Optional[int] = 32
__magic_name__ : List[str] = get_xclip_config(UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ : List[str] = XCLIPModel(UpperCamelCase__ )
model.eval()
if "drive" in checkpoint_url:
__magic_name__ : Dict = "pytorch_model.bin"
gdown.cached_download(UpperCamelCase__ , UpperCamelCase__ , quiet=UpperCamelCase__ )
__magic_name__ : List[str] = torch.load(UpperCamelCase__ , map_location="cpu" )["model"]
else:
__magic_name__ : List[str] = torch.hub.load_state_dict_from_url(UpperCamelCase__ )["model"]
__magic_name__ : Any = convert_state_dict(UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ : Optional[Any] = XCLIPModel(UpperCamelCase__ )
__magic_name__ , __magic_name__ : Optional[Any] = model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
__magic_name__ : List[str] = 336 if model_name == "xclip-large-patch14-16-frames" else 224
__magic_name__ : Optional[Any] = VideoMAEImageProcessor(size=UpperCamelCase__ )
__magic_name__ : int = CLIPTokenizer.from_pretrained("openai/clip-vit-base-patch32" )
__magic_name__ : Tuple = CLIPTokenizerFast.from_pretrained("openai/clip-vit-base-patch32" )
__magic_name__ : Dict = XCLIPProcessor(image_processor=UpperCamelCase__ , tokenizer=UpperCamelCase__ )
__magic_name__ : List[Any] = prepare_video(UpperCamelCase__ )
__magic_name__ : Union[str, Any] = processor(
text=["playing sports", "eating spaghetti", "go shopping"] , videos=UpperCamelCase__ , return_tensors="pt" , padding=UpperCamelCase__ )
print("Shape of pixel values:" , inputs.pixel_values.shape )
with torch.no_grad():
__magic_name__ : List[str] = model(**UpperCamelCase__ )
# Verify outputs
__magic_name__ : Optional[Any] = outputs.logits_per_video
__magic_name__ : int = logits_per_video.softmax(dim=1 )
print("Probs:" , UpperCamelCase__ )
# kinetics-400
if model_name == "xclip-base-patch32":
__magic_name__ : Any = torch.tensor([[0.0019, 0.9951, 0.0030]] )
elif model_name == "xclip-base-patch32-16-frames":
__magic_name__ : List[str] = torch.tensor([[7.0_9_9_9E-0_4, 9.9_8_8_3E-0_1, 4.5_5_8_0E-0_4]] )
elif model_name == "xclip-base-patch16":
__magic_name__ : Tuple = torch.tensor([[0.0083, 0.9681, 0.0236]] )
elif model_name == "xclip-base-patch16-16-frames":
__magic_name__ : int = torch.tensor([[7.6_9_3_7E-0_4, 9.9_7_2_8E-0_1, 1.9_4_7_3E-0_3]] )
elif model_name == "xclip-large-patch14":
__magic_name__ : Any = torch.tensor([[0.0062, 0.9864, 0.0075]] )
elif model_name == "xclip-large-patch14-16-frames":
__magic_name__ : Any = torch.tensor([[3.3_8_7_7E-0_4, 9.9_9_3_7E-0_1, 2.8_8_8_8E-0_4]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
__magic_name__ : Union[str, Any] = torch.tensor([[0.0555, 0.8914, 0.0531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
__magic_name__ : Tuple = torch.tensor([[3.8_5_5_4E-0_4, 9.9_9_2_9E-0_1, 3.2_7_5_4E-0_4]] )
elif model_name == "xclip-large-patch14-kinetics-600":
__magic_name__ : List[Any] = torch.tensor([[0.0036, 0.9920, 0.0045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
__magic_name__ : List[str] = torch.tensor([[7.1_8_9_0E-0_6, 9.9_9_9_4E-0_1, 5.6_5_5_9E-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
__magic_name__ : Tuple = torch.tensor([[1.0_3_2_0E-0_5, 9.9_9_9_3E-0_1, 6.2_4_3_5E-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
__magic_name__ : Tuple = torch.tensor([[4.1_3_7_7E-0_6, 9.9_9_9_0E-0_1, 9.8_3_8_6E-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
__magic_name__ : List[str] = torch.tensor([[4.1_3_4_7E-0_5, 9.9_9_6_2E-0_1, 3.3_4_1_1E-0_4]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
__magic_name__ : List[str] = torch.tensor([[8.5_8_5_7E-0_5, 9.9_9_2_8E-0_1, 6.3_2_9_1E-0_4]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
__magic_name__ : Tuple = torch.tensor([[8.5_8_5_7E-0_5, 9.9_9_2_8E-0_1, 6.3_2_9_1E-0_4]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
__magic_name__ : Dict = torch.tensor([[0.0027, 0.9904, 0.0070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
__magic_name__ : Dict = torch.tensor([[9.8_2_1_9E-0_4, 9.9_5_9_3E-0_1, 3.0_8_6_3E-0_3]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
__magic_name__ : List[str] = torch.tensor([[3.5_0_8_2E-0_4, 9.9_7_8_5E-0_1, 1.7_9_6_6E-0_3]] )
else:
raise ValueError(F"""Model name {model_name} not supported""" )
assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(UpperCamelCase__ )
if push_to_hub:
print("Pushing model, processor and slow tokenizer files to the hub..." )
model.push_to_hub(UpperCamelCase__ , organization="nielsr" )
processor.push_to_hub(UpperCamelCase__ , organization="nielsr" )
slow_tokenizer.push_to_hub(UpperCamelCase__ , organization="nielsr" )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="xclip-base-patch32",
type=str,
help="Name of the model.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
_SCREAMING_SNAKE_CASE : Any = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 436 | 1 |
'''simple docstring'''
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
a = logging.get_logger(__name__)
@dataclass
class a_ :
UpperCAmelCase : str = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(glue_processors.keys() )} )
UpperCAmelCase : str = field(
metadata={"""help""": """The input data dir. Should contain the .tsv files (or other data files) for the task."""} )
UpperCAmelCase : int = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
UpperCAmelCase : bool = field(
default=snake_case , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def UpperCamelCase ( self : Optional[Any] ) -> Any:
snake_case: Dict =self.task_name.lower()
class a_ ( snake_case ):
UpperCAmelCase : Optional[int] = """train"""
UpperCAmelCase : int = """dev"""
UpperCAmelCase : Optional[Any] = """test"""
class a_ ( snake_case ):
UpperCAmelCase : GlueDataTrainingArguments
UpperCAmelCase : str
UpperCAmelCase : List[InputFeatures]
def __init__( self : int , a_ : GlueDataTrainingArguments , a_ : PreTrainedTokenizerBase , a_ : Optional[int] = None , a_ : Union[str, Split] = Split.train , a_ : Optional[str] = None , ) -> Union[str, Any]:
warnings.warn(
'This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets '
'library. You can have a look at this example script for pointers: '
'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py' , a_ , )
snake_case: Union[str, Any] =args
snake_case: str =glue_processors[args.task_name]()
snake_case: List[str] =glue_output_modes[args.task_name]
if isinstance(a_ , a_ ):
try:
snake_case: List[str] =Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name' )
# Load data features from cache or dataset file
snake_case: Tuple =os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}''' , )
snake_case: Optional[int] =self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
snake_case: Dict =label_list[2], label_list[1]
snake_case: List[Any] =label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
snake_case: Dict =cached_features_file + '.lock'
with FileLock(a_ ):
if os.path.exists(a_ ) and not args.overwrite_cache:
snake_case: int =time.time()
snake_case: str =torch.load(a_ )
logger.info(
F'''Loading features from cached file {cached_features_file} [took %.3f s]''' , time.time() - start )
else:
logger.info(F'''Creating features from dataset file at {args.data_dir}''' )
if mode == Split.dev:
snake_case: Optional[int] =self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
snake_case: List[Any] =self.processor.get_test_examples(args.data_dir )
else:
snake_case: str =self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
snake_case: List[str] =examples[:limit_length]
snake_case: List[Any] =glue_convert_examples_to_features(
a_ , a_ , max_length=args.max_seq_length , label_list=a_ , output_mode=self.output_mode , )
snake_case: Optional[Any] =time.time()
torch.save(self.features , a_ )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__( self : Tuple ) -> str:
return len(self.features )
def __getitem__( self : Dict , a_ : int ) -> InputFeatures:
return self.features[i]
def UpperCamelCase ( self : Optional[Any] ) -> Union[str, Any]:
return self.label_list
| 716 |
'''simple docstring'''
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
a = get_tests_dir('fixtures/test_sentencepiece.model')
a = get_tests_dir('fixtures/test_sentencepiece_bpe.model')
a = 'pt' if is_torch_available() else 'tf'
@require_sentencepiece
@require_tokenizers
class a_ ( snake_case , unittest.TestCase ):
UpperCAmelCase : List[str] = CamembertTokenizer
UpperCAmelCase : Dict = CamembertTokenizerFast
UpperCAmelCase : List[str] = True
UpperCAmelCase : str = True
def UpperCamelCase ( self : str ) -> int:
super().setUp()
# We have a SentencePiece fixture for testing
snake_case: Dict =CamembertTokenizer(a_ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase ( self : Tuple ) -> List[Any]:
snake_case: Any ='<pad>'
snake_case: Dict =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a_ ) , a_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a_ ) , a_ )
def UpperCamelCase ( self : Optional[Any] ) -> List[str]:
snake_case: List[str] =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>NOTUSED' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(a_ ) , 1_0_0_4 )
def UpperCamelCase ( self : Dict ) -> Tuple:
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_5 )
def UpperCamelCase ( self : List[Any] ) -> Dict:
snake_case: Tuple =CamembertTokenizer(a_ )
tokenizer.save_pretrained(self.tmpdirname )
snake_case: List[Any] =CamembertTokenizerFast.from_pretrained(self.tmpdirname )
snake_case: str ='I was born in 92000, and this is falsé.'
snake_case: Optional[int] =tokenizer.encode(a_ )
snake_case: int =rust_tokenizer.encode(a_ )
self.assertListEqual(a_ , a_ )
snake_case: Any =tokenizer.encode(a_ , add_special_tokens=a_ )
snake_case: Union[str, Any] =rust_tokenizer.encode(a_ , add_special_tokens=a_ )
self.assertListEqual(a_ , a_ )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
snake_case: Any =tokenizer.convert_ids_to_tokens(a_ )
snake_case: int =rust_tokenizer.tokenize(a_ )
self.assertListEqual(a_ , a_ )
def UpperCamelCase ( self : Dict ) -> int:
if not self.test_rust_tokenizer:
return
snake_case: Tuple =self.get_tokenizer()
snake_case: Union[str, Any] =self.get_rust_tokenizer()
snake_case: Tuple ='I was born in 92000, and this is falsé.'
snake_case: Dict =tokenizer.tokenize(a_ )
snake_case: Optional[int] =rust_tokenizer.tokenize(a_ )
self.assertListEqual(a_ , a_ )
snake_case: Optional[Any] =tokenizer.encode(a_ , add_special_tokens=a_ )
snake_case: Optional[Any] =rust_tokenizer.encode(a_ , add_special_tokens=a_ )
self.assertListEqual(a_ , a_ )
snake_case: Any =self.get_rust_tokenizer()
snake_case: Union[str, Any] =tokenizer.encode(a_ )
snake_case: List[Any] =rust_tokenizer.encode(a_ )
self.assertListEqual(a_ , a_ )
@slow
def UpperCamelCase ( self : Union[str, Any] ) -> List[str]:
# fmt: off
snake_case: List[Any] ={'input_ids': [[5, 5_4, 7_1_9_6, 2_9_7, 3_0, 2_3, 7_7_6, 1_8, 1_1, 3_2_1_5, 3_7_0_5, 8_2_5_2, 2_2, 3_1_6_4, 1_1_8_1, 2_1_1_6, 2_9, 1_6, 8_1_3, 2_5, 7_9_1, 3_3_1_4, 2_0, 3_4_4_6, 3_8, 2_7_5_7_5, 1_2_0, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 4_6_8, 1_7, 1_1, 9_0_8_8, 2_0, 1_5_1_7, 8, 2_2_8_0_4, 1_8_8_1_8, 1_0, 3_8, 6_2_9, 6_0_7, 6_0_7, 1_4_2, 1_9, 7_1_9_6, 8_6_7, 5_6, 1_0_3_2_6, 2_4, 2_2_6_7, 2_0, 4_1_6, 5_0_7_2, 1_5_6_1_2, 2_3_3, 7_3_4, 7, 2_3_9_9, 2_7, 1_6, 3_0_1_5, 1_6_4_9, 7, 2_4, 2_0, 4_3_3_8, 2_3_9_9, 2_7, 1_3, 3_4_0_0, 1_4, 1_3, 6_1_8_9, 8, 9_3_0, 9, 6]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
snake_case: Any =[
'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '
'utilisé principalement dans le domaine du traitement automatique des langues (TAL).',
'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '
'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '
'telles que la traduction et la synthèse de texte.',
]
self.tokenizer_integration_test_util(
expected_encoding=a_ , model_name='camembert-base' , revision='3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf' , sequences=a_ , )
| 347 | 0 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
snake_case = 16
snake_case = 32
def UpperCamelCase_ ( lowerCAmelCase__ , lowerCAmelCase__ = 16 ):
"""simple docstring"""
_lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained("bert-base-cased" )
_lowerCAmelCase : Any = load_dataset("glue" , "mrpc" )
def tokenize_function(lowerCAmelCase__ ):
# max_length=None => use the model max length (it's actually the default)
_lowerCAmelCase : Optional[Any] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_lowerCAmelCase : Tuple = datasets.map(
lowerCAmelCase__ , batched=lowerCAmelCase__ , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_lowerCAmelCase : Union[str, Any] = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(lowerCAmelCase__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_lowerCAmelCase : Optional[int] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_lowerCAmelCase : Optional[int] = 16
elif accelerator.mixed_precision != "no":
_lowerCAmelCase : Any = 8
else:
_lowerCAmelCase : int = None
return tokenizer.pad(
lowerCAmelCase__ , padding="longest" , max_length=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_tensors="pt" , )
# Instantiate dataloaders.
_lowerCAmelCase : Tuple = DataLoader(
tokenized_datasets["train"] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ )
_lowerCAmelCase : str = DataLoader(
tokenized_datasets["validation"] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
snake_case = mocked_dataloaders # noqa: F811
def UpperCamelCase_ ( lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
if os.environ.get("TESTING_MOCKED_DATALOADERS" , lowerCAmelCase__ ) == "1":
_lowerCAmelCase : Dict = 2
# New Code #
_lowerCAmelCase : Union[str, Any] = int(args.gradient_accumulation_steps )
# Initialize accelerator
_lowerCAmelCase : int = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=lowerCAmelCase__ )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
"Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_lowerCAmelCase : Tuple = config["lr"]
_lowerCAmelCase : Dict = int(config["num_epochs"] )
_lowerCAmelCase : Optional[Any] = int(config["seed"] )
_lowerCAmelCase : Tuple = int(config["batch_size"] )
_lowerCAmelCase : Union[str, Any] = evaluate.load("glue" , "mrpc" )
set_seed(lowerCAmelCase__ )
_lowerCAmelCase , _lowerCAmelCase : Tuple = get_dataloaders(lowerCAmelCase__ , lowerCAmelCase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_lowerCAmelCase : Any = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=lowerCAmelCase__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_lowerCAmelCase : Dict = model.to(accelerator.device )
# Instantiate optimizer
_lowerCAmelCase : Optional[int] = AdamW(params=model.parameters() , lr=lowerCAmelCase__ )
# Instantiate scheduler
_lowerCAmelCase : Any = get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase__ , num_warmup_steps=1_00 , num_training_steps=(len(lowerCAmelCase__ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = accelerator.prepare(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Now we train the model
for epoch in range(lowerCAmelCase__ ):
model.train()
for step, batch in enumerate(lowerCAmelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(lowerCAmelCase__ ):
_lowerCAmelCase : Any = model(**lowerCAmelCase__ )
_lowerCAmelCase : str = output.loss
accelerator.backward(lowerCAmelCase__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCAmelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_lowerCAmelCase : Any = model(**lowerCAmelCase__ )
_lowerCAmelCase : Dict = outputs.logits.argmax(dim=-1 )
_lowerCAmelCase , _lowerCAmelCase : str = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=lowerCAmelCase__ , references=lowerCAmelCase__ , )
_lowerCAmelCase : Union[str, Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , lowerCAmelCase__ )
def UpperCamelCase_ ( ):
"""simple docstring"""
_lowerCAmelCase : List[str] = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=lowerCAmelCase__ , default=lowerCAmelCase__ , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
# New Code #
parser.add_argument(
"--gradient_accumulation_steps" , type=lowerCAmelCase__ , default=1 , help="The number of minibatches to be ran before gradients are accumulated." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
_lowerCAmelCase : int = parser.parse_args()
_lowerCAmelCase : List[str] = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 424 | from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
snake_case = TypeVar("T")
class __A ( Generic[T] ):
'''simple docstring'''
a_ = 42 # Cache store of keys
a_ = 42 # References of the keys in cache
a_ = 10 # Maximum capacity of cache
def __init__( self , _snake_case ):
_lowerCAmelCase : Tuple = deque()
_lowerCAmelCase : List[Any] = set()
if not n:
_lowerCAmelCase : Any = sys.maxsize
elif n < 0:
raise ValueError("n should be an integer greater than 0." )
else:
_lowerCAmelCase : List[str] = n
def SCREAMING_SNAKE_CASE__ ( self , _snake_case ):
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
_lowerCAmelCase : Optional[int] = self.dq_store.pop()
self.key_reference.remove(_snake_case )
else:
self.dq_store.remove(_snake_case )
self.dq_store.appendleft(_snake_case )
self.key_reference.add(_snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
for k in self.dq_store:
print(_snake_case )
def __repr__( self ):
return F"""LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case = LRUCache(4)
lru_cache.refer("A")
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer("A")
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 424 | 1 |
"""simple docstring"""
# using dfs for finding eulerian path traversal
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple=None ):
"""simple docstring"""
snake_case_ : Union[str, Any] = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
snake_case_ , snake_case_ : Optional[Any] = True, True
snake_case_ : Optional[int] = dfs(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return path
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
snake_case_ : Tuple = 0
snake_case_ : Union[str, Any] = -1
for i in range(lowerCAmelCase__ ):
if i not in graph.keys():
continue
if len(graph[i] ) % 2 == 1:
odd_degree_nodes += 1
snake_case_ : List[Any] = i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] ):
"""simple docstring"""
snake_case_ : str = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )]
snake_case_ , snake_case_ : List[Any] = check_circuit_or_path(lowerCAmelCase__ , lowerCAmelCase__ )
if check == 3:
print("""graph is not Eulerian""" )
print("""no path""" )
return
snake_case_ : Dict = 1
if check == 2:
snake_case_ : Optional[int] = odd_node
print("""graph has a Euler path""" )
if check == 1:
print("""graph has a Euler cycle""" )
snake_case_ : Dict = dfs(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
print(lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( ):
"""simple docstring"""
snake_case_ : Optional[int] = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
snake_case_ : Dict = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
snake_case_ : int = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
snake_case_ : List[Any] = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
snake_case_ : Any = {
1: [],
2: []
# all degree is zero
}
snake_case_ : int = 1_0
check_euler(lowerCAmelCase__ , lowerCAmelCase__ )
check_euler(lowerCAmelCase__ , lowerCAmelCase__ )
check_euler(lowerCAmelCase__ , lowerCAmelCase__ )
check_euler(lowerCAmelCase__ , lowerCAmelCase__ )
check_euler(lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 705 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : list ):
"""simple docstring"""
snake_case_ : Optional[int] = len(SCREAMING_SNAKE_CASE__ )
for i in range(1 , SCREAMING_SNAKE_CASE__ ):
snake_case_ : Tuple = collection[i]
snake_case_ : Tuple = 0
snake_case_ : str = i - 1
while low <= high:
snake_case_ : Optional[int] = (low + high) // 2
if val < collection[mid]:
snake_case_ : List[str] = mid - 1
else:
snake_case_ : str = mid + 1
for j in range(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , -1 ):
snake_case_ : List[str] = collection[j - 1]
snake_case_ : Any = val
return collection
if __name__ == "__main__":
a_ = input('''Enter numbers separated by a comma:\n''').strip()
a_ = [int(item) for item in user_input.split(''',''')]
print(binary_insertion_sort(unsorted))
| 48 | 0 |
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class __lowercase :
def __init__( self : Dict , __lowerCamelCase : str , __lowerCamelCase : Union[str, Any]=13 , __lowerCamelCase : Any=7 , __lowerCamelCase : str=False , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : List[str]=False , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : Union[str, Any]=19 , __lowerCamelCase : Optional[int]=32 , __lowerCamelCase : int=5 , __lowerCamelCase : Any=4 , __lowerCamelCase : Dict=37 , __lowerCamelCase : List[Any]="gelu" , __lowerCamelCase : Any=0.1 , __lowerCamelCase : str=0.1 , __lowerCamelCase : int=5_12 , __lowerCamelCase : List[str]=16 , __lowerCamelCase : int=2 , __lowerCamelCase : Tuple=0.02 , __lowerCamelCase : int=3 , __lowerCamelCase : List[Any]=4 , __lowerCamelCase : Optional[int]=None , ) -> Any:
'''simple docstring'''
lowercase = parent
lowercase = batch_size
lowercase = seq_length
lowercase = is_training
lowercase = use_input_mask
lowercase = use_token_type_ids
lowercase = use_labels
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = type_vocab_size
lowercase = type_sequence_label_size
lowercase = initializer_range
lowercase = num_labels
lowercase = num_choices
lowercase = scope
def __a ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase = None
if self.use_input_mask:
lowercase = random_attention_mask([self.batch_size, self.seq_length] )
lowercase = None
lowercase = None
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase = ids_tensor([self.batch_size] , self.num_choices )
lowercase = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __a ( self : List[Any] ) -> str:
'''simple docstring'''
lowercase = EsmConfig(
vocab_size=33 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=_lowerCamelCase , esmfold_config={'''trunk''': {'''num_blocks''': 2}, '''fp16_esm''': False} , )
return config
def __a ( self : Union[str, Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Dict , __lowerCamelCase : Tuple ) -> Tuple:
'''simple docstring'''
lowercase = EsmForProteinFolding(config=_lowerCamelCase ).float()
model.to(_lowerCamelCase )
model.eval()
lowercase = model(_lowerCamelCase , attention_mask=_lowerCamelCase )
lowercase = model(_lowerCamelCase )
lowercase = model(_lowerCamelCase )
self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 14, 3) )
self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) )
def __a ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
lowercase = self.prepare_config_and_inputs()
(
(
lowercase
) ,(
lowercase
) ,(
lowercase
) ,(
lowercase
) ,(
lowercase
) ,(
lowercase
) ,
) = config_and_inputs
lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __lowercase ( A_ , A_ , unittest.TestCase ):
lowercase = False
lowercase = (EsmForProteinFolding,) if is_torch_available() else ()
lowercase = ()
lowercase = {} if is_torch_available() else {}
lowercase = False
def __a ( self : Tuple ) -> List[str]:
'''simple docstring'''
lowercase = EsmFoldModelTester(self )
lowercase = ConfigTester(self , config_class=_lowerCamelCase , hidden_size=37 )
def __a ( self : Any ) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
def __a ( self : Optional[int] ) -> str:
'''simple docstring'''
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
@unittest.skip('''Does not support attention outputs''' )
def __a ( self : Dict ) -> str:
'''simple docstring'''
pass
@unittest.skip
def __a ( self : Dict ) -> Dict:
'''simple docstring'''
pass
@unittest.skip('''Esm does not support embedding resizing''' )
def __a ( self : Tuple ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip('''Esm does not support embedding resizing''' )
def __a ( self : int ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip('''ESMFold does not support passing input embeds!''' )
def __a ( self : int ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def __a ( self : List[str] ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def __a ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def __a ( self : Any ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def __a ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def __a ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip('''ESMFold does not output hidden states in the normal way.''' )
def __a ( self : Dict ) -> str:
'''simple docstring'''
pass
@unittest.skip('''ESMfold does not output hidden states in the normal way.''' )
def __a ( self : int ) -> Dict:
'''simple docstring'''
pass
@unittest.skip('''ESMFold only has one output format.''' )
def __a ( self : str ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip('''This test doesn\'t work for ESMFold and doesn\'t test core functionality''' )
def __a ( self : List[str] ) -> Any:
'''simple docstring'''
pass
@unittest.skip('''ESMFold does not support input chunking.''' )
def __a ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip('''ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.''' )
def __a ( self : Any ) -> int:
'''simple docstring'''
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def __a ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def __a ( self : Dict ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def __a ( self : Union[str, Any] ) -> int:
'''simple docstring'''
pass
@unittest.skip('''ESMFold doesn\'t support data parallel.''' )
def __a ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __a ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
pass
@require_torch
class __lowercase ( A_ ):
@slow
def __a ( self : str ) -> Optional[int]:
'''simple docstring'''
lowercase = EsmForProteinFolding.from_pretrained('''facebook/esmfold_v1''' ).float()
model.eval()
lowercase = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowercase = model(_lowerCamelCase )['''positions''']
lowercase = torch.tensor([2.5828, 0.7993, -10.93_34] , dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , _lowerCamelCase , atol=1E-4 ) )
| 604 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
'alibaba-damo/mgp-str-base': 'https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json',
}
class lowerCAmelCase__ ( A_ ):
__a = """mgp-str"""
def __init__( self : int , _lowerCamelCase : str=[32, 128] , _lowerCamelCase : List[str]=4 , _lowerCamelCase : Tuple=3 , _lowerCamelCase : Optional[Any]=27 , _lowerCamelCase : str=38 , _lowerCamelCase : int=50257 , _lowerCamelCase : Tuple=30522 , _lowerCamelCase : Any=768 , _lowerCamelCase : Dict=12 , _lowerCamelCase : Union[str, Any]=12 , _lowerCamelCase : Any=4.0 , _lowerCamelCase : Optional[int]=True , _lowerCamelCase : Optional[Any]=False , _lowerCamelCase : int=1e-5 , _lowerCamelCase : Dict=0.0 , _lowerCamelCase : int=0.0 , _lowerCamelCase : List[str]=0.0 , _lowerCamelCase : Dict=False , _lowerCamelCase : str=0.0_2 , **_lowerCamelCase : Optional[Any] , ):
super().__init__(**_lowerCamelCase )
_snake_case = image_size
_snake_case = patch_size
_snake_case = num_channels
_snake_case = max_token_length
_snake_case = num_character_labels
_snake_case = num_bpe_labels
_snake_case = num_wordpiece_labels
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = mlp_ratio
_snake_case = distilled
_snake_case = layer_norm_eps
_snake_case = drop_rate
_snake_case = qkv_bias
_snake_case = attn_drop_rate
_snake_case = drop_path_rate
_snake_case = output_aa_attentions
_snake_case = initializer_range
| 224 | 0 |
"""simple docstring"""
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def lowerCAmelCase_( lowercase_ : Dict , lowercase_ : str , lowercase_ : List[Any] , lowercase_ : List[Any] , lowercase_ : str=True , lowercase_ : Tuple="pt" ) -> Union[str, Any]:
_lowerCamelCase = {'''add_prefix_space''': True} if isinstance(lowercase_ , lowercase_ ) and not line.startswith(''' ''' ) else {}
_lowerCamelCase = padding_side
return tokenizer(
[line] , max_length=lowercase_ , padding='''max_length''' if pad_to_max_length else None , truncation=lowercase_ , return_tensors=lowercase_ , add_special_tokens=lowercase_ , **lowercase_ , )
def lowerCAmelCase_( lowercase_ : Tuple , lowercase_ : Any , lowercase_ : List[Any]=None , ) -> Union[str, Any]:
_lowerCamelCase = input_ids.ne(lowercase_ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class lowerCamelCase_( A__ ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__="train" , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__="" , ):
super().__init__()
_lowerCamelCase = Path(lowerCamelCase__ ).joinpath(type_path + '''.source''' )
_lowerCamelCase = Path(lowerCamelCase__ ).joinpath(type_path + '''.target''' )
_lowerCamelCase = self.get_char_lens(self.src_file )
_lowerCamelCase = max_source_length
_lowerCamelCase = max_target_length
assert min(self.src_lens ) > 0, F"""found empty line in {self.src_file}"""
_lowerCamelCase = tokenizer
_lowerCamelCase = prefix
if n_obs is not None:
_lowerCamelCase = self.src_lens[:n_obs]
_lowerCamelCase = src_lang
_lowerCamelCase = tgt_lang
def __len__( self ):
return len(self.src_lens )
def __getitem__( self , lowerCamelCase__ ):
_lowerCamelCase = index + 1 # linecache starts at 1
_lowerCamelCase = self.prefix + linecache.getline(str(self.src_file ) , lowerCamelCase__ ).rstrip('''\n''' )
_lowerCamelCase = linecache.getline(str(self.tgt_file ) , lowerCamelCase__ ).rstrip('''\n''' )
assert source_line, F"""empty source line for index {index}"""
assert tgt_line, F"""empty tgt line for index {index}"""
# Need to add eos token manually for T5
if isinstance(self.tokenizer , lowerCamelCase__ ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
_lowerCamelCase = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , lowerCamelCase__ ) else self.tokenizer
)
_lowerCamelCase = self.tokenizer.generator if isinstance(self.tokenizer , lowerCamelCase__ ) else self.tokenizer
_lowerCamelCase = encode_line(lowerCamelCase__ , lowerCamelCase__ , self.max_source_length , '''right''' )
_lowerCamelCase = encode_line(lowerCamelCase__ , lowerCamelCase__ , self.max_target_length , '''right''' )
_lowerCamelCase = source_inputs['''input_ids'''].squeeze()
_lowerCamelCase = target_inputs['''input_ids'''].squeeze()
_lowerCamelCase = source_inputs['''attention_mask'''].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def snake_case__ ( lowerCamelCase__ ):
return [len(lowerCamelCase__ ) for x in Path(lowerCamelCase__ ).open().readlines()]
def snake_case__ ( self , lowerCamelCase__ ):
_lowerCamelCase = torch.stack([x['''input_ids'''] for x in batch] )
_lowerCamelCase = torch.stack([x['''attention_mask'''] for x in batch] )
_lowerCamelCase = torch.stack([x['''decoder_input_ids'''] for x in batch] )
_lowerCamelCase = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , lowerCamelCase__ )
else self.tokenizer.pad_token_id
)
_lowerCamelCase = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , lowerCamelCase__ )
else self.tokenizer.pad_token_id
)
_lowerCamelCase = trim_batch(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase , _lowerCamelCase = trim_batch(lowerCamelCase__ , lowerCamelCase__ , attention_mask=lowerCamelCase__ )
_lowerCamelCase = {
'''input_ids''': source_ids,
'''attention_mask''': source_mask,
'''decoder_input_ids''': y,
}
return batch
__SCREAMING_SNAKE_CASE : Union[str, Any] = getLogger(__name__)
def lowerCAmelCase_( lowercase_ : List[List] ) -> Any:
return list(itertools.chain.from_iterable(lowercase_ ) )
def lowerCAmelCase_( lowercase_ : str ) -> None:
_lowerCamelCase = get_git_info()
save_json(lowercase_ , os.path.join(lowercase_ , '''git_log.json''' ) )
def lowerCAmelCase_( lowercase_ : int , lowercase_ : Union[str, Any] , lowercase_ : Any=4 , **lowercase_ : Union[str, Any] ) -> Union[str, Any]:
with open(lowercase_ , '''w''' ) as f:
json.dump(lowercase_ , lowercase_ , indent=lowercase_ , **lowercase_ )
def lowerCAmelCase_( lowercase_ : Union[str, Any] ) -> List[str]:
with open(lowercase_ ) as f:
return json.load(lowercase_ )
def lowerCAmelCase_( ) -> Tuple:
_lowerCamelCase = git.Repo(search_parent_directories=lowercase_ )
_lowerCamelCase = {
'''repo_id''': str(lowercase_ ),
'''repo_sha''': str(repo.head.object.hexsha ),
'''repo_branch''': str(repo.active_branch ),
'''hostname''': str(socket.gethostname() ),
}
return repo_infos
def lowerCAmelCase_( lowercase_ : Callable , lowercase_ : Iterable ) -> List:
return list(map(lowercase_ , lowercase_ ) )
def lowerCAmelCase_( lowercase_ : List[Any] , lowercase_ : List[str] ) -> int:
with open(lowercase_ , '''wb''' ) as f:
return pickle.dump(lowercase_ , lowercase_ )
def lowerCAmelCase_( lowercase_ : List[Any] ) -> List[Any]:
def remove_articles(lowercase_ : str ):
return re.sub(r'''\b(a|an|the)\b''' , ''' ''' , lowercase_ )
def white_space_fix(lowercase_ : Dict ):
return " ".join(text.split() )
def remove_punc(lowercase_ : Dict ):
_lowerCamelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(lowercase_ : Union[str, Any] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowercase_ ) ) ) )
def lowerCAmelCase_( lowercase_ : Optional[int] , lowercase_ : List[str] ) -> List[str]:
_lowerCamelCase = normalize_answer(lowercase_ ).split()
_lowerCamelCase = normalize_answer(lowercase_ ).split()
_lowerCamelCase = Counter(lowercase_ ) & Counter(lowercase_ )
_lowerCamelCase = sum(common.values() )
if num_same == 0:
return 0
_lowerCamelCase = 1.0 * num_same / len(lowercase_ )
_lowerCamelCase = 1.0 * num_same / len(lowercase_ )
_lowerCamelCase = (2 * precision * recall) / (precision + recall)
return fa
def lowerCAmelCase_( lowercase_ : Optional[int] , lowercase_ : str ) -> List[Any]:
return normalize_answer(lowercase_ ) == normalize_answer(lowercase_ )
def lowerCAmelCase_( lowercase_ : List[str] , lowercase_ : List[str] ) -> Dict:
assert len(lowercase_ ) == len(lowercase_ )
_lowerCamelCase = 0
for hypo, pred in zip(lowercase_ , lowercase_ ):
em += exact_match_score(lowercase_ , lowercase_ )
if len(lowercase_ ) > 0:
em /= len(lowercase_ )
return {"em": em}
def lowerCAmelCase_( lowercase_ : Union[str, Any] ) -> Optional[int]:
return model_prefix.startswith('''rag''' )
def lowerCAmelCase_( lowercase_ : Tuple , lowercase_ : Any , lowercase_ : str ) -> Tuple:
_lowerCamelCase = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
_lowerCamelCase = '''dropout_rate'''
for p in extra_params:
if getattr(lowercase_ , lowercase_ , lowercase_ ):
if not hasattr(lowercase_ , lowercase_ ) and not hasattr(lowercase_ , equivalent_param[p] ):
logger.info('''config doesn\'t have a `{}` attribute'''.format(lowercase_ ) )
delattr(lowercase_ , lowercase_ )
continue
_lowerCamelCase = p if hasattr(lowercase_ , lowercase_ ) else equivalent_param[p]
setattr(lowercase_ , lowercase_ , getattr(lowercase_ , lowercase_ ) )
delattr(lowercase_ , lowercase_ )
return hparams, config
| 705 |
"""simple docstring"""
import warnings
from ..trainer import Trainer
from ..utils import logging
__SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
class lowerCamelCase_( A__ ):
'''simple docstring'''
def __init__( self , lowerCamelCase__=None , **lowerCamelCase__ ):
warnings.warn(
'''`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` '''
'''instead.''' , lowerCamelCase__ , )
super().__init__(args=lowerCamelCase__ , **lowerCamelCase__ )
| 623 | 0 |
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase__ ( _A , _A , _A ):
'''simple docstring'''
snake_case_ = MobileBertConfig.from_json_file(_A )
print(f"Building PyTorch model from configuration: {config}" )
snake_case_ = MobileBertForPreTraining(_A )
# Load weights from tf checkpoint
snake_case_ = load_tf_weights_in_mobilebert(_A , _A , _A )
# Save pytorch-model
print(f"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , _A )
if __name__ == "__main__":
lowercase__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--mobilebert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained MobileBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
lowercase__ : Tuple = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 376 |
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class UpperCAmelCase :
'''simple docstring'''
def snake_case__ ( self : List[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
snake_case_ = TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
snake_case_ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
snake_case_ = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
"ResnetDownsampleBlock2D",
"SimpleCrossAttnDownBlock2D",
] , mid_block_type="UNetMidBlock2DSimpleCrossAttn" , up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="text" , addition_embed_type_num_heads=2 , cross_attention_norm="group_norm" , resnet_time_scale_shift="scale_shift" , act_fn="gelu" , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
snake_case_ = DDPMScheduler(
num_train_timesteps=10_00 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , thresholding=__lowercase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="epsilon" , variance_type="learned_range" , )
torch.manual_seed(0 )
snake_case_ = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def snake_case__ ( self : List[str] ):
"""simple docstring"""
torch.manual_seed(0 )
snake_case_ = TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
snake_case_ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
snake_case_ = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
"ResnetDownsampleBlock2D",
"SimpleCrossAttnDownBlock2D",
] , mid_block_type="UNetMidBlock2DSimpleCrossAttn" , up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="text" , addition_embed_type_num_heads=2 , cross_attention_norm="group_norm" , resnet_time_scale_shift="scale_shift" , act_fn="gelu" , class_embed_type="timestep" , mid_block_scale_factor=1.414 , time_embedding_act_fn="gelu" , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
snake_case_ = DDPMScheduler(
num_train_timesteps=10_00 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , thresholding=__lowercase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="epsilon" , variance_type="learned_range" , )
torch.manual_seed(0 )
snake_case_ = DDPMScheduler(
num_train_timesteps=10_00 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , )
torch.manual_seed(0 )
snake_case_ = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def snake_case__ ( self : List[str] ):
"""simple docstring"""
snake_case_ = self.get_dummy_components()
snake_case_ = self.pipeline_class(**__lowercase )
pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
snake_case_ = self.get_dummy_inputs(__lowercase )
snake_case_ = inputs["prompt"]
snake_case_ = inputs["generator"]
snake_case_ = inputs["num_inference_steps"]
snake_case_ = inputs["output_type"]
if "image" in inputs:
snake_case_ = inputs["image"]
else:
snake_case_ = None
if "mask_image" in inputs:
snake_case_ = inputs["mask_image"]
else:
snake_case_ = None
if "original_image" in inputs:
snake_case_ = inputs["original_image"]
else:
snake_case_ = None
snake_case_ , snake_case_ = pipe.encode_prompt(__lowercase )
# inputs with prompt converted to embeddings
snake_case_ = {
"prompt_embeds": prompt_embeds,
"negative_prompt_embeds": negative_prompt_embeds,
"generator": generator,
"num_inference_steps": num_inference_steps,
"output_type": output_type,
}
if image is not None:
snake_case_ = image
if mask_image is not None:
snake_case_ = mask_image
if original_image is not None:
snake_case_ = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(__lowercase , __lowercase , __lowercase )
snake_case_ = pipe(**__lowercase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(__lowercase )
snake_case_ = self.pipeline_class.from_pretrained(__lowercase )
pipe_loaded.to(__lowercase )
pipe_loaded.set_progress_bar_config(disable=__lowercase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(__lowercase , __lowercase ) is None , f"`{optional_component}` did not stay set to None after loading." , )
snake_case_ = self.get_dummy_inputs(__lowercase )
snake_case_ = inputs["generator"]
snake_case_ = inputs["num_inference_steps"]
snake_case_ = inputs["output_type"]
# inputs with prompt converted to embeddings
snake_case_ = {
"prompt_embeds": prompt_embeds,
"negative_prompt_embeds": negative_prompt_embeds,
"generator": generator,
"num_inference_steps": num_inference_steps,
"output_type": output_type,
}
if image is not None:
snake_case_ = image
if mask_image is not None:
snake_case_ = mask_image
if original_image is not None:
snake_case_ = original_image
snake_case_ = pipe_loaded(**__lowercase )[0]
snake_case_ = np.abs(to_np(__lowercase ) - to_np(__lowercase ) ).max()
self.assertLess(__lowercase , 1E-4 )
def snake_case__ ( self : Union[str, Any] ):
"""simple docstring"""
snake_case_ = self.get_dummy_components()
snake_case_ = self.pipeline_class(**__lowercase )
pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
snake_case_ = self.get_dummy_inputs(__lowercase )
snake_case_ = pipe(**__lowercase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(__lowercase )
snake_case_ = self.pipeline_class.from_pretrained(__lowercase )
pipe_loaded.to(__lowercase )
pipe_loaded.set_progress_bar_config(disable=__lowercase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
snake_case_ = self.get_dummy_inputs(__lowercase )
snake_case_ = pipe_loaded(**__lowercase )[0]
snake_case_ = np.abs(to_np(__lowercase ) - to_np(__lowercase ) ).max()
self.assertLess(__lowercase , 1E-4 )
| 376 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : Any = logging.get_logger(__name__)
A__ : str = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''}
class snake_case__ ( _UpperCAmelCase ):
A__ = '''ctrl'''
A__ = ['''past_key_values''']
A__ = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : int , __a : Dict=246534 , __a : Optional[Any]=256 , __a : List[Any]=1280 , __a : str=8192 , __a : Dict=48 , __a : str=16 , __a : Optional[Any]=0.1 , __a : Optional[int]=0.1 , __a : Optional[int]=1e-6 , __a : Optional[Any]=0.0_2 , __a : Union[str, Any]=True , **__a : Dict , ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : Any = vocab_size
__snake_case : Any = n_positions
__snake_case : Any = n_embd
__snake_case : Any = n_layer
__snake_case : Optional[int] = n_head
__snake_case : List[Any] = dff
__snake_case : List[str] = resid_pdrop
__snake_case : List[Any] = embd_pdrop
__snake_case : str = layer_norm_epsilon
__snake_case : str = initializer_range
__snake_case : Optional[Any] = use_cache
super().__init__(**lowercase__ )
| 702 |
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
A__ : List[str] = '''docs/source/en/_toctree.yml'''
def a_ ( _UpperCAmelCase : List[Any] ) -> List[str]:
__snake_case : str = defaultdict(_UpperCAmelCase )
for doc in model_doc:
counts[doc["local"]] += 1
__snake_case : Any = [key for key, value in counts.items() if value > 1]
__snake_case : Dict = []
for duplicate_key in duplicates:
__snake_case : Optional[Any] = list({doc['title'] for doc in model_doc if doc['local'] == duplicate_key} )
if len(_UpperCAmelCase ) > 1:
raise ValueError(
f'''{duplicate_key} is present several times in the documentation table of content at '''
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['local']] == 1] )
# Sort
return sorted(_UpperCAmelCase ,key=lambda _UpperCAmelCase : s["title"].lower() )
def a_ ( _UpperCAmelCase : Tuple=False ) -> List[str]:
with open(_UpperCAmelCase ,encoding='utf-8' ) as f:
__snake_case : int = yaml.safe_load(f.read() )
# Get to the API doc
__snake_case : str = 0
while content[api_idx]["title"] != "API":
api_idx += 1
__snake_case : Tuple = content[api_idx]['sections']
# Then to the model doc
__snake_case : List[str] = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
__snake_case : Any = api_doc[model_idx]['sections']
__snake_case : int = [(idx, section) for idx, section in enumerate(_UpperCAmelCase ) if 'sections' in section]
__snake_case : Tuple = False
for idx, modality_doc in modalities_docs:
__snake_case : Dict = modality_doc['sections']
__snake_case : Union[str, Any] = clean_model_doc_toc(_UpperCAmelCase )
if old_modality_doc != new_modality_doc:
__snake_case : Optional[Any] = True
if overwrite:
__snake_case : Any = new_modality_doc
if diff:
if overwrite:
__snake_case : int = model_doc
__snake_case : List[Any] = api_doc
with open(_UpperCAmelCase ,'w' ,encoding='utf-8' ) as f:
f.write(yaml.dump(_UpperCAmelCase ,allow_unicode=_UpperCAmelCase ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
A__ : Tuple = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
A__ : Optional[Any] = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 124 | 0 |
from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
__lowerCamelCase : List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
def A__ ( _a : Optional[Any] ):
'''simple docstring'''
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(lowercase_ ):
return ext
raise Exception(
f"Unable to determine file format from file extension {path}. "
f"Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}" )
def A__ ( _a : str ):
'''simple docstring'''
snake_case__ : Any =pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
snake_case__ : int =try_infer_format_from_ext(args.input ) if args.format == '''infer''' else args.format
snake_case__ : Tuple =PipelineDataFormat.from_str(
format=lowercase_ , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , )
return RunCommand(lowercase_ , lowercase_ )
class _lowercase ( _UpperCamelCase ):
def __init__( self , a , a ):
snake_case__ : Any =nlp
snake_case__ : Any =reader
@staticmethod
def lowercase__ ( a ):
snake_case__ : Optional[Any] =parser.add_parser("""run""" , help="""Run a pipeline through the CLI""" )
run_parser.add_argument("""--task""" , choices=get_supported_tasks() , help="""Task to run""" )
run_parser.add_argument("""--input""" , type=lowercase__ , help="""Path to the file to use for inference""" )
run_parser.add_argument("""--output""" , type=lowercase__ , help="""Path to the file that will be used post to write results.""" )
run_parser.add_argument("""--model""" , type=lowercase__ , help="""Name or path to the model to instantiate.""" )
run_parser.add_argument("""--config""" , type=lowercase__ , help="""Name or path to the model\'s config to instantiate.""" )
run_parser.add_argument(
"""--tokenizer""" , type=lowercase__ , help="""Name of the tokenizer to use. (default: same as the model name)""" )
run_parser.add_argument(
"""--column""" , type=lowercase__ , help="""Name of the column to use as input. (For multi columns input as QA use column1,columns2)""" , )
run_parser.add_argument(
"""--format""" , type=lowercase__ , default="""infer""" , choices=PipelineDataFormat.SUPPORTED_FORMATS , help="""Input format to read from""" , )
run_parser.add_argument(
"""--device""" , type=lowercase__ , default=-1 , help="""Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)""" , )
run_parser.add_argument("""--overwrite""" , action="""store_true""" , help="""Allow overwriting the output file.""" )
run_parser.set_defaults(func=lowercase__ )
def lowercase__ ( self ):
snake_case__ : List[str] =self._nlp, []
for entry in self._reader:
snake_case__ : Union[str, Any] =nlp(**lowercase__ ) if self._reader.is_multi_columns else nlp(lowercase__ )
if isinstance(lowercase__ , lowercase__ ):
outputs.append(lowercase__ )
else:
outputs += output
# Saving data
if self._nlp.binary_output:
snake_case__ : List[str] =self._reader.save_binary(lowercase__ )
logger.warning(F"Current pipeline requires output to be in binary format, saving at {binary_path}" )
else:
self._reader.save(lowercase__ )
| 385 |
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
# See all LED models at https://huggingface.co/models?filter=LED
lowerCAmelCase = {
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
lowerCAmelCase = {
"""allenai/led-base-16384""": 16_384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def __SCREAMING_SNAKE_CASE ( ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
__UpperCAmelCase : Tuple = bs[:]
__UpperCAmelCase : Dict = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowercase_ )
cs.append(2**8 + n )
n += 1
__UpperCAmelCase : Union[str, Any] = [chr(lowercase_ ) for n in cs]
return dict(zip(lowercase_ , lowercase_ ) )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase : Any = set()
__UpperCAmelCase : Tuple = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__UpperCAmelCase : Tuple = char
return pairs
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : List[Any] = VOCAB_FILES_NAMES
_lowerCAmelCase : int = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase : int = ['''input_ids''', '''attention_mask''']
def __init__( self , lowercase__ , lowercase__ , lowercase__="replace" , lowercase__="<s>" , lowercase__="</s>" , lowercase__="</s>" , lowercase__="<s>" , lowercase__="<unk>" , lowercase__="<pad>" , lowercase__="<mask>" , lowercase__=False , **lowercase__ , ):
__UpperCAmelCase : List[Any] = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__) if isinstance(lowercase__ , lowercase__) else bos_token
__UpperCAmelCase : int = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__) if isinstance(lowercase__ , lowercase__) else eos_token
__UpperCAmelCase : Optional[Any] = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__) if isinstance(lowercase__ , lowercase__) else sep_token
__UpperCAmelCase : Any = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__) if isinstance(lowercase__ , lowercase__) else cls_token
__UpperCAmelCase : Any = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__) if isinstance(lowercase__ , lowercase__) else unk_token
__UpperCAmelCase : List[str] = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__) if isinstance(lowercase__ , lowercase__) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__UpperCAmelCase : Optional[Any] = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__) if isinstance(lowercase__ , lowercase__) else mask_token
super().__init__(
errors=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , unk_token=lowercase__ , sep_token=lowercase__ , cls_token=lowercase__ , pad_token=lowercase__ , mask_token=lowercase__ , add_prefix_space=lowercase__ , **lowercase__ , )
with open(lowercase__ , encoding='''utf-8''') as vocab_handle:
__UpperCAmelCase : Optional[int] = json.load(lowercase__)
__UpperCAmelCase : List[str] = {v: k for k, v in self.encoder.items()}
__UpperCAmelCase : Optional[Any] = errors # how to handle errors in decoding
__UpperCAmelCase : str = bytes_to_unicode()
__UpperCAmelCase : List[str] = {v: k for k, v in self.byte_encoder.items()}
with open(lowercase__ , encoding='''utf-8''') as merges_handle:
__UpperCAmelCase : Optional[int] = merges_handle.read().split('''\n''')[1:-1]
__UpperCAmelCase : int = [tuple(merge.split()) for merge in bpe_merges]
__UpperCAmelCase : str = dict(zip(lowercase__ , range(len(lowercase__))))
__UpperCAmelCase : Optional[int] = {}
__UpperCAmelCase : Optional[int] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__UpperCAmelCase : List[Any] = re.compile(r'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''')
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def A( self):
return len(self.encoder)
def A( self):
return dict(self.encoder , **self.added_tokens_encoder)
def A( self , lowercase__):
if token in self.cache:
return self.cache[token]
__UpperCAmelCase : int = tuple(lowercase__)
__UpperCAmelCase : int = get_pairs(lowercase__)
if not pairs:
return token
while True:
__UpperCAmelCase : Union[str, Any] = min(lowercase__ , key=lambda lowercase__: self.bpe_ranks.get(lowercase__ , float('''inf''')))
if bigram not in self.bpe_ranks:
break
__UpperCAmelCase , __UpperCAmelCase : Tuple = bigram
__UpperCAmelCase : List[str] = []
__UpperCAmelCase : List[str] = 0
while i < len(lowercase__):
try:
__UpperCAmelCase : List[Any] = word.index(lowercase__ , lowercase__)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
__UpperCAmelCase : str = j
if word[i] == first and i < len(lowercase__) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
__UpperCAmelCase : Union[str, Any] = tuple(lowercase__)
__UpperCAmelCase : Dict = new_word
if len(lowercase__) == 1:
break
else:
__UpperCAmelCase : Optional[int] = get_pairs(lowercase__)
__UpperCAmelCase : List[Any] = ''' '''.join(lowercase__)
__UpperCAmelCase : Tuple = word
return word
def A( self , lowercase__):
__UpperCAmelCase : str = []
for token in re.findall(self.pat , lowercase__):
__UpperCAmelCase : Dict = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''')) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowercase__).split(''' '''))
return bpe_tokens
def A( self , lowercase__):
return self.encoder.get(lowercase__ , self.encoder.get(self.unk_token))
def A( self , lowercase__):
return self.decoder.get(lowercase__)
def A( self , lowercase__):
__UpperCAmelCase : str = ''''''.join(lowercase__)
__UpperCAmelCase : Optional[int] = bytearray([self.byte_decoder[c] for c in text]).decode('''utf-8''' , errors=self.errors)
return text
def A( self , lowercase__ , lowercase__ = None):
if not os.path.isdir(lowercase__):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
__UpperCAmelCase : List[Any] = os.path.join(
lowercase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
__UpperCAmelCase : Optional[Any] = os.path.join(
lowercase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''])
with open(lowercase__ , '''w''' , encoding='''utf-8''') as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowercase__ , ensure_ascii=lowercase__) + '''\n''')
__UpperCAmelCase : Tuple = 0
with open(lowercase__ , '''w''' , encoding='''utf-8''') as writer:
writer.write('''#version: 0.2\n''')
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowercase__: kv[1]):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
''' Please check that the tokenizer is not corrupted!''')
__UpperCAmelCase : Optional[int] = token_index
writer.write(''' '''.join(lowercase__) + '''\n''')
index += 1
return vocab_file, merge_file
def A( self , lowercase__ , lowercase__ = None):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__UpperCAmelCase : Optional[Any] = [self.cls_token_id]
__UpperCAmelCase : Union[str, Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A( self , lowercase__ , lowercase__ = None , lowercase__ = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase__ , token_ids_a=lowercase__ , already_has_special_tokens=lowercase__)
if token_ids_a is None:
return [1] + ([0] * len(lowercase__)) + [1]
return [1] + ([0] * len(lowercase__)) + [1, 1] + ([0] * len(lowercase__)) + [1]
def A( self , lowercase__ , lowercase__ = None):
__UpperCAmelCase : List[Any] = [self.sep_token_id]
__UpperCAmelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def A( self , lowercase__ , lowercase__=False , **lowercase__):
__UpperCAmelCase : List[Any] = kwargs.pop('''add_prefix_space''' , self.add_prefix_space)
if (is_split_into_words or add_prefix_space) and (len(lowercase__) > 0 and not text[0].isspace()):
__UpperCAmelCase : List[Any] = ''' ''' + text
return (text, kwargs)
def A( self , lowercase__ , lowercase__ = None , lowercase__ = PaddingStrategy.DO_NOT_PAD , lowercase__ = None , lowercase__ = None , ):
__UpperCAmelCase : Optional[Any] = super()._pad(
encoded_inputs=lowercase__ , max_length=lowercase__ , padding_strategy=lowercase__ , pad_to_multiple_of=lowercase__ , return_attention_mask=lowercase__ , )
# Load from model defaults
if return_attention_mask is None:
__UpperCAmelCase : Optional[Any] = '''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
__UpperCAmelCase : Dict = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
__UpperCAmelCase : int = len(encoded_inputs['''global_attention_mask''']) != len(lowercase__)
if needs_to_be_padded:
__UpperCAmelCase : Dict = len(lowercase__) - len(encoded_inputs['''global_attention_mask'''])
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
__UpperCAmelCase : Optional[Any] = (
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
__UpperCAmelCase : int = [-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side))
return encoded_inputs
| 462 | 0 |
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ : Any = logging.get_logger(__name__)
lowerCAmelCase_ : Dict = """▁"""
lowerCAmelCase_ : Any = {
"""vocab_file""": """vocab.json""",
"""spm_file""": """sentencepiece.bpe.model""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
lowerCAmelCase_ : int = {
"""vocab_file""": {
"""facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json""",
"""facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json""",
},
"""spm_file""": {
"""facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model""",
"""facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model""",
},
"""tokenizer_config_file""": {
"""facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json""",
"""facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json""",
},
}
lowerCAmelCase_ : Any = {
"""facebook/m2m100_418M""": 1024,
}
# fmt: off
lowerCAmelCase_ : Union[str, Any] = {
"""m2m100""": ["""af""", """am""", """ar""", """ast""", """az""", """ba""", """be""", """bg""", """bn""", """br""", """bs""", """ca""", """ceb""", """cs""", """cy""", """da""", """de""", """el""", """en""", """es""", """et""", """fa""", """ff""", """fi""", """fr""", """fy""", """ga""", """gd""", """gl""", """gu""", """ha""", """he""", """hi""", """hr""", """ht""", """hu""", """hy""", """id""", """ig""", """ilo""", """is""", """it""", """ja""", """jv""", """ka""", """kk""", """km""", """kn""", """ko""", """lb""", """lg""", """ln""", """lo""", """lt""", """lv""", """mg""", """mk""", """ml""", """mn""", """mr""", """ms""", """my""", """ne""", """nl""", """no""", """ns""", """oc""", """or""", """pa""", """pl""", """ps""", """pt""", """ro""", """ru""", """sd""", """si""", """sk""", """sl""", """so""", """sq""", """sr""", """ss""", """su""", """sv""", """sw""", """ta""", """th""", """tl""", """tn""", """tr""", """uk""", """ur""", """uz""", """vi""", """wo""", """xh""", """yi""", """yo""", """zh""", """zu"""],
"""wmt21""": ["""en""", """ha""", """is""", """ja""", """cs""", """ru""", """zh""", """de"""]
}
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = ['''input_ids''', '''attention_mask''']
UpperCAmelCase__ = []
UpperCAmelCase__ = []
def __init__( self : Any , lowercase__ : Optional[int] , lowercase__ : Optional[int] , lowercase__ : Tuple=None , lowercase__ : Any=None , lowercase__ : int="<s>" , lowercase__ : Dict="</s>" , lowercase__ : int="</s>" , lowercase__ : int="<pad>" , lowercase__ : List[str]="<unk>" , lowercase__ : Union[str, Any]="m2m100" , lowercase__ : int = None , lowercase__ : List[Any]=8 , **lowercase__ : Union[str, Any] , ) ->None:
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
_UpperCamelCase : List[Any] = language_codes
_UpperCamelCase : Tuple = FAIRSEQ_LANGUAGE_CODES[language_codes]
_UpperCamelCase : Tuple = {lang_code: f'''__{lang_code}__''' for lang_code in fairseq_language_code}
_UpperCamelCase : str = kwargs.get("additional_special_tokens" , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(__a )
for lang_code in fairseq_language_code
if self.get_lang_token(__a ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=__a , tgt_lang=__a , bos_token=__a , eos_token=__a , sep_token=__a , unk_token=__a , pad_token=__a , language_codes=__a , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=__a , **__a , )
_UpperCamelCase : Union[str, Any] = vocab_file
_UpperCamelCase : Any = load_json(__a )
_UpperCamelCase : Dict = {v: k for k, v in self.encoder.items()}
_UpperCamelCase : int = spm_file
_UpperCamelCase : str = load_spm(__a , self.sp_model_kwargs )
_UpperCamelCase : Optional[int] = len(self.encoder )
_UpperCamelCase : List[Any] = {
self.get_lang_token(__a ): self.encoder_size + i for i, lang_code in enumerate(__a )
}
_UpperCamelCase : Optional[Any] = {lang_code: self.encoder_size + i for i, lang_code in enumerate(__a )}
_UpperCamelCase : int = {v: k for k, v in self.lang_token_to_id.items()}
_UpperCamelCase : List[str] = src_lang if src_lang is not None else "en"
_UpperCamelCase : Any = tgt_lang
_UpperCamelCase : str = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
_UpperCamelCase : Optional[int] = num_madeup_words
@property
def snake_case__ ( self : Union[str, Any] ) ->int:
'''simple docstring'''
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def snake_case__ ( self : Optional[int] ) ->str:
'''simple docstring'''
return self._src_lang
@src_lang.setter
def snake_case__ ( self : List[str] , lowercase__ : int ) ->None:
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def snake_case__ ( self : Optional[Any] , lowercase__ : Dict ) ->List[str]:
'''simple docstring'''
return self.sp_model.encode(__a , out_type=__a )
def snake_case__ ( self : List[Any] , lowercase__ : List[str] ) ->Tuple:
'''simple docstring'''
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(__a , self.encoder[self.unk_token] )
def snake_case__ ( self : str , lowercase__ : Optional[int] ) ->str:
'''simple docstring'''
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(__a , self.unk_token )
def snake_case__ ( self : Union[str, Any] , lowercase__ : int ) ->Optional[int]:
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = []
_UpperCamelCase : List[Any] = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__a ) + token
_UpperCamelCase : List[str] = []
else:
current_sub_tokens.append(__a )
out_string += self.sp_model.decode(__a )
return out_string.strip()
def snake_case__ ( self : str , lowercase__ : Dict , lowercase__ : Tuple = None , lowercase__ : str = False ) ->List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a )
_UpperCamelCase : Optional[int] = [1] * len(self.prefix_tokens )
_UpperCamelCase : Tuple = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__a )) + suffix_ones
return prefix_ones + ([0] * len(__a )) + ([0] * len(__a )) + suffix_ones
def snake_case__ ( self : Optional[Any] , lowercase__ : Dict , lowercase__ : int = None ) ->List[int]:
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def snake_case__ ( self : Union[str, Any] ) ->Dict:
'''simple docstring'''
_UpperCamelCase : Any = {self.convert_ids_to_tokens(__a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Any ) ->Dict:
'''simple docstring'''
_UpperCamelCase : int = self.__dict__.copy()
_UpperCamelCase : Union[str, Any] = None
return state
def __setstate__( self : Union[str, Any] , lowercase__ : Optional[Any] ) ->None:
'''simple docstring'''
_UpperCamelCase : int = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_UpperCamelCase : int = {}
_UpperCamelCase : Dict = load_spm(self.spm_file , self.sp_model_kwargs )
def snake_case__ ( self : Optional[int] , lowercase__ : Any , lowercase__ : int = None ) ->Tuple[str]:
'''simple docstring'''
_UpperCamelCase : List[str] = Path(__a )
if not save_dir.is_dir():
raise OSError(f'''{save_directory} should be a directory''' )
_UpperCamelCase : Any = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["vocab_file"]
)
_UpperCamelCase : Tuple = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["spm_file"]
)
save_json(self.encoder , __a )
if os.path.abspath(self.spm_file ) != os.path.abspath(__a ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , __a )
elif not os.path.isfile(self.spm_file ):
with open(__a , "wb" ) as fi:
_UpperCamelCase : Tuple = self.sp_model.serialized_model_proto()
fi.write(__a )
return (str(__a ), str(__a ))
def snake_case__ ( self : str , lowercase__ : Optional[int] , lowercase__ : Tuple = "en" , lowercase__ : List[str] = None , lowercase__ : Tuple = "ro" , **lowercase__ : List[Any] , ) ->BatchEncoding:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = src_lang
_UpperCamelCase : int = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(__a , __a , **__a )
def snake_case__ ( self : Dict , lowercase__ : Any , lowercase__ : Dict , lowercase__ : Optional[int] , **lowercase__ : Any ) ->int:
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
_UpperCamelCase : Union[str, Any] = src_lang
_UpperCamelCase : Any = self(__a , add_special_tokens=__a , **__a )
_UpperCamelCase : List[str] = self.get_lang_id(__a )
_UpperCamelCase : str = tgt_lang_id
return inputs
def snake_case__ ( self : Optional[int] ) ->int:
'''simple docstring'''
self.set_src_lang_special_tokens(self.src_lang )
def snake_case__ ( self : str ) ->str:
'''simple docstring'''
self.set_tgt_lang_special_tokens(self.tgt_lang )
def snake_case__ ( self : List[str] , lowercase__ : Dict ) ->None:
'''simple docstring'''
_UpperCamelCase : Tuple = self.get_lang_token(__a )
_UpperCamelCase : List[str] = self.lang_token_to_id[lang_token]
_UpperCamelCase : Dict = [self.cur_lang_id]
_UpperCamelCase : List[Any] = [self.eos_token_id]
def snake_case__ ( self : List[Any] , lowercase__ : Dict ) ->None:
'''simple docstring'''
_UpperCamelCase : List[Any] = self.get_lang_token(__a )
_UpperCamelCase : Dict = self.lang_token_to_id[lang_token]
_UpperCamelCase : str = [self.cur_lang_id]
_UpperCamelCase : str = [self.eos_token_id]
def snake_case__ ( self : List[str] , lowercase__ : Tuple ) ->str:
'''simple docstring'''
return self.lang_code_to_token[lang]
def snake_case__ ( self : List[str] , lowercase__ : Optional[int] ) ->int:
'''simple docstring'''
_UpperCamelCase : List[Any] = self.get_lang_token(__a )
return self.lang_token_to_id[lang_token]
def __A ( UpperCAmelCase ,UpperCAmelCase ) -> sentencepiece.SentencePieceProcessor:
'''simple docstring'''
_UpperCamelCase : str = sentencepiece.SentencePieceProcessor(**__snake_case )
spm.Load(str(__snake_case ) )
return spm
def __A ( UpperCAmelCase ) -> Union[Dict, List]:
'''simple docstring'''
with open(__snake_case ,"r" ) as f:
return json.load(__snake_case )
def __A ( UpperCAmelCase ,UpperCAmelCase ) -> None:
'''simple docstring'''
with open(__snake_case ,"w" ) as f:
json.dump(__snake_case ,__snake_case ,indent=2 )
| 712 | '''simple docstring'''
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
lowerCAmelCase_ : Optional[int] = logging.get_logger(__name__)
lowerCAmelCase_ : int = r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax
or scores for each vocabulary token after SoftMax.
kwargs (`Dict[str, Any]`, *optional*):
Additional stopping criteria specific kwargs.
Return:
`bool`. `False` indicates we should continue, `True` indicates we should stop.
"""
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@add_start_docstrings(lowercase__ )
def __call__( self : str , lowercase__ : torch.LongTensor , lowercase__ : torch.FloatTensor , **lowercase__ : Tuple ) ->bool:
'''simple docstring'''
raise NotImplementedError("StoppingCriteria needs to be subclassed" )
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowercase__ : int , lowercase__ : Optional[int] = None ) ->Union[str, Any]:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = max_length
_UpperCamelCase : str = max_position_embeddings
@add_start_docstrings(lowercase__ )
def __call__( self : Optional[int] , lowercase__ : torch.LongTensor , lowercase__ : torch.FloatTensor , **lowercase__ : Optional[Any] ) ->bool:
'''simple docstring'''
_UpperCamelCase : List[Any] = input_ids.shape[-1]
_UpperCamelCase : Optional[int] = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
"This is a friendly reminder - the current text generation call will exceed the model's predefined "
f'''maximum length ({self.max_position_embeddings}). Depending on the model, you may observe '''
"exceptions, performance degradation, or nothing at all." )
return is_done
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : List[Any] , lowercase__ : int , lowercase__ : int ) ->int:
'''simple docstring'''
warnings.warn(
"The class `MaxNewTokensCriteria` is deprecated. "
f'''Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` '''
"with `max_length = start_length + max_new_tokens` instead." , lowercase__ , )
_UpperCamelCase : Dict = start_length
_UpperCamelCase : Optional[Any] = max_new_tokens
_UpperCamelCase : List[Any] = start_length + max_new_tokens
@add_start_docstrings(lowercase__ )
def __call__( self : Dict , lowercase__ : torch.LongTensor , lowercase__ : torch.FloatTensor , **lowercase__ : Dict ) ->bool:
'''simple docstring'''
return input_ids.shape[-1] >= self.max_length
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowercase__ : float , lowercase__ : Optional[float] = None ) ->Dict:
'''simple docstring'''
_UpperCamelCase : Dict = max_time
_UpperCamelCase : int = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(lowercase__ )
def __call__( self : Dict , lowercase__ : torch.LongTensor , lowercase__ : torch.FloatTensor , **lowercase__ : Tuple ) ->bool:
'''simple docstring'''
return time.time() - self.initial_timestamp > self.max_time
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@add_start_docstrings(lowercase__ )
def __call__( self : Optional[int] , lowercase__ : torch.LongTensor , lowercase__ : torch.FloatTensor , **lowercase__ : List[Any] ) ->bool:
'''simple docstring'''
return any(criteria(lowercase__ , lowercase__ ) for criteria in self )
@property
def snake_case__ ( self : Tuple ) ->Optional[int]:
'''simple docstring'''
for stopping_criterium in self:
if isinstance(lowercase__ , lowercase__ ):
return stopping_criterium.max_length
elif isinstance(lowercase__ , lowercase__ ):
return stopping_criterium.max_length
return None
def __A ( UpperCAmelCase ,UpperCAmelCase ) -> StoppingCriteriaList:
'''simple docstring'''
_UpperCamelCase : Optional[int] = stopping_criteria.max_length
_UpperCamelCase : Optional[Any] = deepcopy(UpperCAmelCase )
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn("You set different `max_length` for stopping criteria and `max_length` parameter" ,UpperCAmelCase )
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=UpperCAmelCase ) )
return new_stopping_criteria
| 204 | 0 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
UpperCAmelCase_ : Dict = logging.get_logger(__name__)
UpperCAmelCase_ : Union[str, Any] = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"adapter_layer": "encoder.layers.*.adapter_layer",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
"pooling_layer.linear": "projector",
"pooling_layer.projection": "classifier",
}
UpperCAmelCase_ : Union[str, Any] = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"projector",
"classifier",
]
def UpperCamelCase ( _A : int )-> Union[str, Any]:
"""simple docstring"""
A__ = {}
with open(_A , "r" ) as file:
for line_number, line in enumerate(_A ):
A__ = line.strip()
if line:
A__ = line.split()
A__ = line_number
A__ = words[0]
A__ = value
return result
def UpperCamelCase ( _A : int , _A : List[str] , _A : Union[str, Any] , _A : Dict , _A : Any )-> Union[str, Any]:
"""simple docstring"""
for attribute in key.split("." ):
A__ = getattr(_A , _A )
A__ = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_A ):
A__ = PARAM_MAPPING[full_name.split("." )[-1]]
A__ = "param"
if weight_type is not None and weight_type != "param":
A__ = getattr(_A , _A ).shape
elif weight_type is not None and weight_type == "param":
A__ = hf_pointer
for attribute in hf_param_name.split("." ):
A__ = getattr(_A , _A )
A__ = shape_pointer.shape
# let's reduce dimension
A__ = value[0]
else:
A__ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
A__ = value
elif weight_type == "weight_g":
A__ = value
elif weight_type == "weight_v":
A__ = value
elif weight_type == "bias":
A__ = value
elif weight_type == "param":
for attribute in hf_param_name.split("." ):
A__ = getattr(_A , _A )
A__ = value
else:
A__ = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def UpperCamelCase ( _A : Dict , _A : Union[str, Any] , _A : Dict , _A : Dict , _A : Optional[int] )-> str:
"""simple docstring"""
A__ = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_A ):
A__ = PARAM_MAPPING[full_name.split("." )[-1]]
A__ = "param"
if weight_type is not None and weight_type != "param":
A__ = ".".join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
A__ = ".".join([key, hf_param_name] )
else:
A__ = key
A__ = value if "lm_head" in full_key else value[0]
UpperCAmelCase_ : str = {
"W_a": "linear_1.weight",
"W_b": "linear_2.weight",
"b_a": "linear_1.bias",
"b_b": "linear_2.bias",
"ln_W": "norm.weight",
"ln_b": "norm.bias",
}
def UpperCamelCase ( _A : Tuple , _A : Union[str, Any] , _A : Optional[int]=None , _A : List[Any]=None )-> Union[str, Any]:
"""simple docstring"""
A__ = False
for key, mapped_key in MAPPING.items():
A__ = "wav2vec2." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
A__ = True
if "*" in mapped_key:
A__ = name.split(_A )[0].split("." )[-2]
A__ = mapped_key.replace("*" , _A )
if "weight_g" in name:
A__ = "weight_g"
elif "weight_v" in name:
A__ = "weight_v"
elif "bias" in name:
A__ = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
A__ = "weight"
else:
A__ = None
if hf_dict is not None:
rename_dict(_A , _A , _A , _A , _A )
else:
set_recursively(_A , _A , _A , _A , _A )
return is_used
return is_used
def UpperCamelCase ( _A : Dict , _A : str , _A : Dict )-> Tuple:
"""simple docstring"""
A__ = []
A__ = fairseq_model.state_dict()
A__ = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
A__ = False
if "conv_layers" in name:
load_conv_layer(
_A , _A , _A , _A , hf_model.config.feat_extract_norm == "group" , )
A__ = True
else:
A__ = load_wavaveca_layer(_A , _A , _A )
if not is_used:
unused_weights.append(_A )
logger.warning(f"""Unused weights: {unused_weights}""" )
def UpperCamelCase ( _A : Optional[int] , _A : str , _A : Optional[Any] , _A : Any , _A : Any )-> Union[str, Any]:
"""simple docstring"""
A__ = full_name.split("conv_layers." )[-1]
A__ = name.split("." )
A__ = int(items[0] )
A__ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
A__ = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
A__ = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
A__ = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
A__ = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_A )
@torch.no_grad()
def UpperCamelCase ( _A : Optional[Any] , _A : List[Any] , _A : Tuple=None , _A : List[str]=None , _A : str=True , _A : Dict=False )-> List[str]:
"""simple docstring"""
if config_path is not None:
A__ = WavaVecaConfig.from_pretrained(_A )
else:
A__ = WavaVecaConfig()
if is_seq_class:
A__ = read_txt_into_dict(_A )
A__ = idalabel
A__ = WavaVecaForSequenceClassification(_A )
A__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=_A , return_attention_mask=_A , )
feature_extractor.save_pretrained(_A )
elif is_finetuned:
if dict_path:
A__ = Dictionary.load(_A )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
A__ = target_dict.pad_index
A__ = target_dict.bos_index
A__ = target_dict.eos_index
A__ = len(target_dict.symbols )
A__ = os.path.join(_A , "vocab.json" )
if not os.path.isdir(_A ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(_A ) )
return
os.makedirs(_A , exist_ok=_A )
A__ = target_dict.indices
# fairseq has the <pad> and <s> switched
A__ = 0
A__ = 1
with open(_A , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(_A , _A )
A__ = WavaVecaCTCTokenizer(
_A , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=_A , )
A__ = True if config.feat_extract_norm == "layer" else False
A__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=_A , return_attention_mask=_A , )
A__ = WavaVecaProcessor(feature_extractor=_A , tokenizer=_A )
processor.save_pretrained(_A )
A__ = WavaVecaForCTC(_A )
else:
A__ = WavaVecaForPreTraining(_A )
if is_finetuned or is_seq_class:
A__ , A__ , A__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
A__ = argparse.Namespace(task="audio_pretraining" )
A__ = fairseq.tasks.setup_task(_A )
A__ , A__ , A__ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_A )
A__ = model[0].eval()
recursively_load_weights(_A , _A , not is_finetuned )
hf_wavavec.save_pretrained(_A )
if __name__ == "__main__":
UpperCAmelCase_ : str = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
parser.add_argument(
"--is_seq_class",
action="store_true",
help="Whether the model to convert is a fine-tuned sequence classification model or not",
)
UpperCAmelCase_ : List[str] = parser.parse_args()
UpperCAmelCase_ : Dict = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 491 |
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def UpperCamelCase ( _A : Union[str, Any] , _A : Optional[Any] , _A : List[Any] )-> Any:
"""simple docstring"""
A__ = OmegaConf.load(_A )
A__ = torch.load(_A , map_location="cpu" )["model"]
A__ = list(state_dict.keys() )
# extract state_dict for VQVAE
A__ = {}
A__ = "first_stage_model."
for key in keys:
if key.startswith(_A ):
A__ = state_dict[key]
# extract state_dict for UNetLDM
A__ = {}
A__ = "model.diffusion_model."
for key in keys:
if key.startswith(_A ):
A__ = state_dict[key]
A__ = config.model.params.first_stage_config.params
A__ = config.model.params.unet_config.params
A__ = VQModel(**_A ).eval()
vqvae.load_state_dict(_A )
A__ = UNetLDMModel(**_A ).eval()
unet.load_state_dict(_A )
A__ = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule="scaled_linear" , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=_A , )
A__ = LDMPipeline(_A , _A , _A )
pipeline.save_pretrained(_A )
if __name__ == "__main__":
UpperCAmelCase_ : Tuple = argparse.ArgumentParser()
parser.add_argument("--checkpoint_path", type=str, required=True)
parser.add_argument("--config_path", type=str, required=True)
parser.add_argument("--output_path", type=str, required=True)
UpperCAmelCase_ : str = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 491 | 1 |
'''simple docstring'''
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def snake_case_ (*__A : List[str] , __A : List[str] = None , __A : Dict=True , __A : Any=2 ) -> Any:
from .. import __version__
__lowerCAmelCase : Optional[Any] = take_from
__lowerCAmelCase : Any = ()
if not isinstance(args[0] , lowerCamelCase__ ):
__lowerCAmelCase : Optional[int] = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(lowerCamelCase__ ).base_version ) >= version.parse(lowerCamelCase__ ):
raise ValueError(
f'''The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''''
f''' version {__version__} is >= {version_name}''' )
__lowerCAmelCase : List[Any] = None
if isinstance(lowerCamelCase__ , lowerCamelCase__ ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(lowerCamelCase__ ),)
__lowerCAmelCase : List[Any] = f'''The `{attribute}` argument is deprecated and will be removed in version {version_name}.'''
elif hasattr(lowerCamelCase__ , lowerCamelCase__ ):
values += (getattr(lowerCamelCase__ , lowerCamelCase__ ),)
__lowerCAmelCase : Any = f'''The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'''
elif deprecated_kwargs is None:
__lowerCAmelCase : Optional[int] = f'''`{attribute}` is deprecated and will be removed in version {version_name}.'''
if warning is not None:
__lowerCAmelCase : Optional[int] = warning + " " if standard_warn else ""
warnings.warn(warning + message , lowerCamelCase__ , stacklevel=lowerCamelCase__ )
if isinstance(lowerCamelCase__ , lowerCamelCase__ ) and len(lowerCamelCase__ ) > 0:
__lowerCAmelCase : str = inspect.getouterframes(inspect.currentframe() )[1]
__lowerCAmelCase : Union[str, Any] = call_frame.filename
__lowerCAmelCase : List[str] = call_frame.lineno
__lowerCAmelCase : Dict = call_frame.function
__lowerCAmelCase : Tuple = next(iter(deprecated_kwargs.items() ) )
raise TypeError(f'''{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`''' )
if len(lowerCamelCase__ ) == 0:
return
elif len(lowerCamelCase__ ) == 1:
return values[0]
return values
| 710 |
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def snake_case_ (__A : int ) -> str:
__lowerCAmelCase : str = int(__A )
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase : str = t // 3_6_0_0, (t // 6_0) % 6_0, t % 6_0
return f'''{h}:{m:02d}:{s:02d}''' if h != 0 else f'''{m:02d}:{s:02d}'''
def snake_case_ (__A : Dict , __A : Any , __A : List[str] , __A : Optional[int] , __A : Dict=3_0_0 ) -> int:
# docstyle-ignore
return f'''
<div>
{prefix}
<progress value=\'{value}\' max=\'{total}\' style=\'width:{width}px; height:20px; vertical-align: middle;\'></progress>
{label}
</div>
'''
def snake_case_ (__A : Optional[Any] ) -> Tuple:
__lowerCAmelCase : List[Any] = """<table border=\"1\" class=\"dataframe\">\n"""
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += f''' <th>{i}</th>\n'''
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
__lowerCAmelCase : Any = f'''{elt:.6f}''' if isinstance(__A , __A ) else str(__A )
html_code += f''' <td>{elt}</td>\n'''
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
lowerCamelCase : Optional[int] =5
lowerCamelCase : Tuple =0.2
def __init__( self : Tuple , lowerCAmelCase : int , lowerCAmelCase : Optional[str] = None , lowerCAmelCase : bool = True , lowerCAmelCase : Optional["NotebookTrainingTracker"] = None , lowerCAmelCase : int = 3_00 , ) -> int:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = total
__lowerCAmelCase : Dict = """""" if prefix is None else prefix
__lowerCAmelCase : str = leave
__lowerCAmelCase : Optional[Any] = parent
__lowerCAmelCase : Optional[Any] = width
__lowerCAmelCase : List[str] = None
__lowerCAmelCase : Union[str, Any] = None
__lowerCAmelCase : List[str] = None
def SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase : int , lowerCAmelCase : bool = False , lowerCAmelCase : str = None ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : str = value
if comment is not None:
__lowerCAmelCase : Optional[Any] = comment
if self.last_value is None:
__lowerCAmelCase : List[Any] = time.time()
__lowerCAmelCase : Optional[int] = value
__lowerCAmelCase : Optional[int] = None
__lowerCAmelCase : Any = self.warmup
__lowerCAmelCase : List[str] = 1
self.update_bar(lowerCAmelCase )
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ):
if self.first_calls > 0:
self.first_calls -= 1
__lowerCAmelCase : Optional[Any] = time.time()
__lowerCAmelCase : Optional[int] = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
__lowerCAmelCase : Optional[Any] = self.elapsed_time / (value - self.start_value)
else:
__lowerCAmelCase : str = None
if value >= self.total:
__lowerCAmelCase : Any = self.total
__lowerCAmelCase : List[str] = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
__lowerCAmelCase : List[str] = self.average_time_per_item * (self.total - value)
self.update_bar(lowerCAmelCase )
__lowerCAmelCase : str = value
__lowerCAmelCase : Union[str, Any] = current_time
if self.average_time_per_item is None:
__lowerCAmelCase : Optional[Any] = 1
else:
__lowerCAmelCase : List[str] = max(int(self.update_every / self.average_time_per_item ) , 1 )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[int]=None ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : List[str] = """ """ * (len(str(self.total ) ) - len(str(lowerCAmelCase ) )) + str(lowerCAmelCase )
if self.elapsed_time is None:
__lowerCAmelCase : List[str] = f'''[{spaced_value}/{self.total} : < :'''
elif self.predicted_remaining is None:
__lowerCAmelCase : Dict = f'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )}'''
else:
__lowerCAmelCase : Dict = (
f'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <'''
f''' {format_time(self.predicted_remaining )}'''
)
self.label += f''', {1/self.average_time_per_item:.2f} it/s'''
self.label += "]" if self.comment is None or len(self.comment ) == 0 else f''', {self.comment}]'''
self.display()
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : str = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
__lowerCAmelCase : List[str] = disp.display(disp.HTML(self.html_code ) , display_id=lowerCAmelCase )
else:
self.output.update(disp.HTML(self.html_code ) )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
"""simple docstring"""
if self.parent is None and self.output is not None:
self.output.update(disp.HTML("""""" ) )
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Optional[Any]=None ) -> Any:
"""simple docstring"""
super().__init__(lowerCAmelCase )
__lowerCAmelCase : str = None if column_names is None else [column_names]
__lowerCAmelCase : List[str] = None
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Any = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table )
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
__lowerCAmelCase : Optional[int] = disp.display(disp.HTML(self.html_code ) , display_id=lowerCAmelCase )
else:
self.output.update(disp.HTML(self.html_code ) )
def SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase : Tuple ) -> Optional[Any]:
"""simple docstring"""
if self.inner_table is None:
__lowerCAmelCase : Tuple = [list(values.keys() ), list(values.values() )]
else:
__lowerCAmelCase : Dict = self.inner_table[0]
if len(self.inner_table ) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(lowerCAmelCase )
__lowerCAmelCase : List[str] = columns
self.inner_table.append([values[c] for c in columns] )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase : Any , lowerCAmelCase : Tuple=None , lowerCAmelCase : List[str]=3_00 ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = NotebookProgressBar(lowerCAmelCase , prefix=lowerCAmelCase , parent=self , width=lowerCAmelCase )
return self.child_bar
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = None
self.display()
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
def __init__( self : Optional[int] ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : int = None
__lowerCAmelCase : Any = None
__lowerCAmelCase : Optional[Any] = False
def SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[int] , lowerCAmelCase : int , **lowerCAmelCase : Any ) -> str:
"""simple docstring"""
__lowerCAmelCase : int = """Epoch""" if args.evaluation_strategy == IntervalStrategy.EPOCH else """Step"""
__lowerCAmelCase : Tuple = 0
__lowerCAmelCase : str = 0
__lowerCAmelCase : List[Any] = [self.first_column] + ["""Training Loss"""]
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append("""Validation Loss""" )
__lowerCAmelCase : int = NotebookTrainingTracker(state.max_steps , lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : int , **lowerCAmelCase : Tuple ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Any = int(state.epoch ) if int(state.epoch ) == state.epoch else f'''{state.epoch:.2f}'''
self.training_tracker.update(
state.global_step + 1 , comment=f'''Epoch {epoch}/{state.num_train_epochs}''' , force_update=self._force_next_update , )
__lowerCAmelCase : Optional[Any] = False
def SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase : Dict , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Tuple=None , **lowerCAmelCase : Optional[int] ) -> List[str]:
"""simple docstring"""
if not has_length(lowerCAmelCase ):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
__lowerCAmelCase : List[str] = self.training_tracker.add_child(len(lowerCAmelCase ) )
else:
__lowerCAmelCase : List[Any] = NotebookProgressBar(len(lowerCAmelCase ) )
self.prediction_bar.update(1 )
else:
self.prediction_bar.update(self.prediction_bar.value + 1 )
def SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Tuple , **lowerCAmelCase : Tuple ) -> Optional[int]:
"""simple docstring"""
if self.prediction_bar is not None:
self.prediction_bar.close()
__lowerCAmelCase : List[str] = None
def SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase : str , lowerCAmelCase : List[str] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Any=None , **lowerCAmelCase : Tuple ) -> List[str]:
"""simple docstring"""
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
__lowerCAmelCase : List[str] = {"""Training Loss""": logs["""loss"""]}
# First column is necessarily Step sine we're not in epoch eval strategy
__lowerCAmelCase : Tuple = state.global_step
self.training_tracker.write_line(lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase : List[str] , lowerCAmelCase : List[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : int=None , **lowerCAmelCase : Union[str, Any] ) -> str:
"""simple docstring"""
if self.training_tracker is not None:
__lowerCAmelCase : Union[str, Any] = {"""Training Loss""": """No log""", """Validation Loss""": """No log"""}
for log in reversed(state.log_history ):
if "loss" in log:
__lowerCAmelCase : int = log["""loss"""]
break
if self.first_column == "Epoch":
__lowerCAmelCase : int = int(state.epoch )
else:
__lowerCAmelCase : Optional[int] = state.global_step
__lowerCAmelCase : Union[str, Any] = """eval"""
for k in metrics:
if k.endswith("""_loss""" ):
__lowerCAmelCase : Dict = re.sub(r"""\_loss$""" , """""" , lowerCAmelCase )
__lowerCAmelCase : Tuple = metrics.pop("""total_flos""" , lowerCAmelCase )
__lowerCAmelCase : List[Any] = metrics.pop("""epoch""" , lowerCAmelCase )
__lowerCAmelCase : Optional[Any] = metrics.pop(f'''{metric_key_prefix}_runtime''' , lowerCAmelCase )
__lowerCAmelCase : Tuple = metrics.pop(f'''{metric_key_prefix}_samples_per_second''' , lowerCAmelCase )
__lowerCAmelCase : List[Any] = metrics.pop(f'''{metric_key_prefix}_steps_per_second''' , lowerCAmelCase )
__lowerCAmelCase : Dict = metrics.pop(f'''{metric_key_prefix}_jit_compilation_time''' , lowerCAmelCase )
for k, v in metrics.items():
if k == f'''{metric_key_prefix}_loss''':
__lowerCAmelCase : Tuple = v
else:
__lowerCAmelCase : Any = k.split("""_""" )
__lowerCAmelCase : Optional[Any] = """ """.join([part.capitalize() for part in splits[1:]] )
__lowerCAmelCase : List[str] = v
self.training_tracker.write_line(lowerCAmelCase )
self.training_tracker.remove_child()
__lowerCAmelCase : int = None
# Evaluation takes a long time so we should force the next update.
__lowerCAmelCase : str = True
def SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase : int , lowerCAmelCase : List[str] , lowerCAmelCase : Dict , **lowerCAmelCase : Any ) -> Tuple:
"""simple docstring"""
self.training_tracker.update(
state.global_step , comment=f'''Epoch {int(state.epoch )}/{state.num_train_epochs}''' , force_update=lowerCAmelCase )
__lowerCAmelCase : Optional[int] = None
| 218 | 0 |
"""simple docstring"""
def __A ( a_ : Optional[int] )-> Optional[int]: # noqa: E741
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = len(a_ )
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : Optional[int] = [0] * n
SCREAMING_SNAKE_CASE : Optional[int] = [False] * n
SCREAMING_SNAKE_CASE : List[str] = [False] * n
def dfs(a_ : int , a_ : Optional[Any] , a_ : List[Any] , a_ : List[Any] ):
if parent == root:
out_edge_count += 1
SCREAMING_SNAKE_CASE : int = True
SCREAMING_SNAKE_CASE : Dict = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
SCREAMING_SNAKE_CASE : str = dfs(a_ , a_ , a_ , a_ )
SCREAMING_SNAKE_CASE : List[str] = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
SCREAMING_SNAKE_CASE : Optional[Any] = True
# AP found via cycle
if at == low[to]:
SCREAMING_SNAKE_CASE : List[Any] = True
else:
SCREAMING_SNAKE_CASE : List[str] = min(low[at] , a_ )
return out_edge_count
for i in range(a_ ):
if not visited[i]:
SCREAMING_SNAKE_CASE : str = 0
SCREAMING_SNAKE_CASE : int = dfs(a_ , a_ , -1 , a_ )
SCREAMING_SNAKE_CASE : str = out_edge_count > 1
for x in range(len(a_ ) ):
if is_art[x] is True:
print(a_ )
# Adjacency list of graph
lowerCamelCase__ : int = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 698 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCamelCase__ : Union[str, Any] = {
"configuration_m2m_100": ["M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP", "M2M100Config", "M2M100OnnxConfig"],
"tokenization_m2m_100": ["M2M100Tokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : str = [
"M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST",
"M2M100ForConditionalGeneration",
"M2M100Model",
"M2M100PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
lowerCamelCase__ : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 698 | 1 |
from math import asin, atan, cos, radians, sin, sqrt, tan
lowerCAmelCase_ = 6_378_137.0
lowerCAmelCase_ = 6_356_752.314_245
lowerCAmelCase_ = 6_378_137
def snake_case ( UpperCAmelCase : float, UpperCAmelCase : float, UpperCAmelCase : float, UpperCAmelCase : float ):
A = (AXIS_A - AXIS_B) / AXIS_A
A = atan((1 - flattening) * tan(radians(UpperCAmelCase ) ) )
A = atan((1 - flattening) * tan(radians(UpperCAmelCase ) ) )
A = radians(UpperCAmelCase )
A = radians(UpperCAmelCase )
# Equation
A = sin((phi_a - phi_a) / 2 )
A = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
A = sqrt(sin_sq_phi + (cos(UpperCAmelCase ) * cos(UpperCAmelCase ) * sin_sq_lambda) )
return 2 * RADIUS * asin(UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 110 |
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def snake_case ( UpperCAmelCase : List[Any] ):
A = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(UpperCAmelCase, UpperCAmelCase )
def snake_case ( UpperCAmelCase : Union[str, Any] ):
A = list(s_dict.keys() )
for key in keys:
if "transformer_layers" in key:
A = s_dict.pop(UpperCAmelCase )
elif "subsample" in key:
A = s_dict.pop(UpperCAmelCase )
def snake_case ( UpperCAmelCase : Union[str, Any] ):
A , A = emb.weight.shape
A = nn.Linear(UpperCAmelCase, UpperCAmelCase, bias=UpperCAmelCase )
A = emb.weight.data
return lin_layer
def snake_case ( UpperCAmelCase : str, UpperCAmelCase : str ):
A = torch.load(UpperCAmelCase, map_location='cpu' )
A = mam_aaa['args']
A = mam_aaa['model']
A = state_dict['decoder.output_projection.weight']
remove_ignore_keys_(UpperCAmelCase )
rename_keys(UpperCAmelCase )
A = state_dict['decoder.embed_tokens.weight'].shape[0]
A = args.share_decoder_input_output_embed
A = [int(UpperCAmelCase ) for i in args.conv_kernel_sizes.split(',' )]
A = SpeechaTextConfig(
vocab_size=UpperCAmelCase, max_source_positions=args.max_source_positions, max_target_positions=args.max_target_positions, encoder_layers=args.encoder_layers, decoder_layers=args.decoder_layers, encoder_attention_heads=args.encoder_attention_heads, decoder_attention_heads=args.decoder_attention_heads, encoder_ffn_dim=args.encoder_ffn_embed_dim, decoder_ffn_dim=args.decoder_ffn_embed_dim, d_model=args.encoder_embed_dim, dropout=args.dropout, attention_dropout=args.attention_dropout, activation_dropout=args.activation_dropout, activation_function='relu', num_conv_layers=len(UpperCAmelCase ), conv_channels=args.conv_channels, conv_kernel_sizes=UpperCAmelCase, input_feat_per_channel=args.input_feat_per_channel, input_channels=args.input_channels, tie_word_embeddings=UpperCAmelCase, num_beams=5, max_length=2_00, use_cache=UpperCAmelCase, decoder_start_token_id=2, early_stopping=UpperCAmelCase, )
A = SpeechaTextForConditionalGeneration(UpperCAmelCase )
A , A = model.model.load_state_dict(UpperCAmelCase, strict=UpperCAmelCase )
if len(UpperCAmelCase ) > 0 and not set(UpperCAmelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'
f' but all the following weights are missing {missing}' )
if tie_embeds:
A = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
A = lm_head_weights
model.save_pretrained(UpperCAmelCase )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--fairseq_path', type=str, help='Path to the fairseq model (.pt) file.')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
lowerCAmelCase_ = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 110 | 1 |
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class _A :
'''simple docstring'''
def __init__( self : Optional[int] , lowerCamelCase : str = "cpu" , lowerCamelCase : str = "openai/clip-vit-large-patch14" ):
'''simple docstring'''
__lowercase = device
__lowercase = CLIPTokenizerFast.from_pretrained(lowerCamelCase )
__lowercase = [0.4814_5466, 0.457_8275, 0.4082_1073]
__lowercase = [0.2686_2954, 0.2613_0258, 0.2757_7711]
__lowercase = torchvision.transforms.Normalize(self.image_mean , self.image_std )
__lowercase = torchvision.transforms.Resize(224 )
__lowercase = torchvision.transforms.CenterCrop(224 )
def _snake_case ( self : Optional[Any] , lowerCamelCase : List[str] ):
'''simple docstring'''
__lowercase = self.resize(lowerCamelCase )
__lowercase = self.center_crop(lowerCamelCase )
__lowercase = self.normalize(lowerCamelCase )
return images
def __call__( self : Optional[Any] , lowerCamelCase : Any=None , lowerCamelCase : Optional[Any]=None , **lowerCamelCase : List[str] ):
'''simple docstring'''
__lowercase = self.tokenizer(text=lowerCamelCase , **lowerCamelCase )
__lowercase = self.preprocess_img(lowerCamelCase )
__lowercase = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class _A ( nn.Module ):
'''simple docstring'''
def __init__( self : Any , lowerCamelCase : Tuple=10 , lowerCamelCase : str=0.01 , lowerCamelCase : Tuple=None , lowerCamelCase : Dict=None , lowerCamelCase : List[Any]=None , lowerCamelCase : str=None , lowerCamelCase : List[Any]=None , lowerCamelCase : str=None , lowerCamelCase : int=False , lowerCamelCase : int=True , lowerCamelCase : Union[str, Any]="image" , lowerCamelCase : Any=True , lowerCamelCase : str=False , lowerCamelCase : Union[str, Any]=False , lowerCamelCase : Optional[Any]=False , ):
'''simple docstring'''
super().__init__()
__lowercase = None
__lowercase = device if device else get_device()
if vqgan:
__lowercase = vqgan
else:
__lowercase = load_vqgan(self.device , conf_path=lowerCamelCase , ckpt_path=lowerCamelCase )
self.vqgan.eval()
if clip:
__lowercase = clip
else:
__lowercase = CLIPModel.from_pretrained("openai/clip-vit-base-patch32" )
self.clip.to(self.device )
__lowercase = ProcessorGradientFlow(device=self.device )
__lowercase = iterations
__lowercase = lr
__lowercase = log
__lowercase = make_grid
__lowercase = return_val
__lowercase = quantize
__lowercase = self.vqgan.decoder.z_shape
def _snake_case ( self : Tuple , lowerCamelCase : Optional[int]=None , lowerCamelCase : List[Any]=None , lowerCamelCase : Dict=5 , lowerCamelCase : Dict=True ):
'''simple docstring'''
__lowercase = []
if output_path is None:
__lowercase = "./animation.gif"
if input_path is None:
__lowercase = self.save_path
__lowercase = sorted(glob(input_path + "/*" ) )
if not len(lowerCamelCase ):
raise ValueError(
"No images found in save path, aborting (did you pass save_intermediate=True to the generate"
" function?)" )
if len(lowerCamelCase ) == 1:
print("Only one image found in save path, (did you pass save_intermediate=True to the generate function?)" )
__lowercase = total_duration / len(lowerCamelCase )
__lowercase = [frame_duration] * len(lowerCamelCase )
if extend_frames:
__lowercase = 1.5
__lowercase = 3
for file_name in paths:
if file_name.endswith(".png" ):
images.append(imageio.imread(lowerCamelCase ) )
imageio.mimsave(lowerCamelCase , lowerCamelCase , duration=lowerCamelCase )
print(f"""gif saved to {output_path}""" )
def _snake_case ( self : str , lowerCamelCase : str=None , lowerCamelCase : int=None ):
'''simple docstring'''
if not (path or img):
raise ValueError("Input either path or tensor" )
if img is not None:
raise NotImplementedError
__lowercase = preprocess(Image.open(lowerCamelCase ) , target_image_size=256 ).to(self.device )
__lowercase = preprocess_vqgan(lowerCamelCase )
__lowercase , *__lowercase = self.vqgan.encode(lowerCamelCase )
return z
def _snake_case ( self : Tuple , lowerCamelCase : int ):
'''simple docstring'''
__lowercase = self.latent.detach().requires_grad_()
__lowercase = base_latent + transform_vector
if self.quantize:
__lowercase , *__lowercase = self.vqgan.quantize(lowerCamelCase )
else:
__lowercase = trans_latent
return self.vqgan.decode(lowerCamelCase )
def _snake_case ( self : Optional[int] , lowerCamelCase : Optional[Any] , lowerCamelCase : str , lowerCamelCase : List[Any]=None ):
'''simple docstring'''
__lowercase = self.clip_preprocessor(text=lowerCamelCase , images=lowerCamelCase , return_tensors="pt" , padding=lowerCamelCase )
__lowercase = self.clip(**lowerCamelCase )
__lowercase = clip_outputs.logits_per_image
if weights is not None:
__lowercase = similarity_logits * weights
return similarity_logits.sum()
def _snake_case ( self : Dict , lowerCamelCase : str , lowerCamelCase : List[str] , lowerCamelCase : Tuple ):
'''simple docstring'''
__lowercase = self._get_clip_similarity(pos_prompts["prompts"] , lowerCamelCase , weights=(1 / pos_prompts["weights"]) )
if neg_prompts:
__lowercase = self._get_clip_similarity(neg_prompts["prompts"] , lowerCamelCase , weights=neg_prompts["weights"] )
else:
__lowercase = torch.tensor([1] , device=self.device )
__lowercase = -torch.log(lowerCamelCase ) + torch.log(lowerCamelCase )
return loss
def _snake_case ( self : Dict , lowerCamelCase : Optional[int] , lowerCamelCase : Optional[int] , lowerCamelCase : Optional[int] ):
'''simple docstring'''
__lowercase = torch.randn_like(self.latent , requires_grad=lowerCamelCase , device=self.device )
__lowercase = torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
__lowercase = self._add_vector(lowerCamelCase )
__lowercase = loop_post_process(lowerCamelCase )
__lowercase = self._get_CLIP_loss(lowerCamelCase , lowerCamelCase , lowerCamelCase )
print("CLIP loss" , lowerCamelCase )
if self.log:
wandb.log({"CLIP Loss": clip_loss} )
clip_loss.backward(retain_graph=lowerCamelCase )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def _snake_case ( self : Optional[Any] , lowerCamelCase : Dict , lowerCamelCase : Tuple , lowerCamelCase : int ):
'''simple docstring'''
wandb.init(reinit=lowerCamelCase , project="face-editor" )
wandb.config.update({"Positive Prompts": positive_prompts} )
wandb.config.update({"Negative Prompts": negative_prompts} )
wandb.config.update({"lr": self.lr, "iterations": self.iterations} )
if image_path:
__lowercase = Image.open(lowerCamelCase )
__lowercase = image.resize((256, 256) )
wandb.log("Original Image" , wandb.Image(lowerCamelCase ) )
def _snake_case ( self : int , lowerCamelCase : int ):
'''simple docstring'''
if not prompts:
return []
__lowercase = []
__lowercase = []
if isinstance(lowerCamelCase , lowerCamelCase ):
__lowercase = [prompt.strip() for prompt in prompts.split("|" )]
for prompt in prompts:
if isinstance(lowerCamelCase , (tuple, list) ):
__lowercase = prompt[0]
__lowercase = float(prompt[1] )
elif ":" in prompt:
__lowercase , __lowercase = prompt.split(":" )
__lowercase = float(lowerCamelCase )
else:
__lowercase = prompt
__lowercase = 1.0
processed_prompts.append(lowerCamelCase )
weights.append(lowerCamelCase )
return {
"prompts": processed_prompts,
"weights": torch.tensor(lowerCamelCase , device=self.device ),
}
def _snake_case ( self : Union[str, Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Union[str, Any]=None , lowerCamelCase : List[str]=None , lowerCamelCase : Dict=True , lowerCamelCase : Dict=False , lowerCamelCase : Optional[int]=True , lowerCamelCase : List[str]=True , lowerCamelCase : Any=None , ):
'''simple docstring'''
if image_path:
__lowercase = self._get_latent(lowerCamelCase )
else:
__lowercase = torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(lowerCamelCase , lowerCamelCase , lowerCamelCase )
assert pos_prompts, "You must provide at least one positive prompt."
__lowercase = self.process_prompts(lowerCamelCase )
__lowercase = self.process_prompts(lowerCamelCase )
if save_final and save_path is None:
__lowercase = os.path.join("./outputs/" , "_".join(pos_prompts["prompts"] ) )
if not os.path.exists(lowerCamelCase ):
os.makedirs(lowerCamelCase )
else:
__lowercase = save_path + "_" + get_timestamp()
os.makedirs(lowerCamelCase )
__lowercase = save_path
__lowercase = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print("Original Image" )
show_pil(custom_to_pil(lowerCamelCase ) )
__lowercase = loop_post_process(lowerCamelCase )
for iter, transformed_img in enumerate(self._optimize_CLIP(lowerCamelCase , lowerCamelCase , lowerCamelCase ) ):
if show_intermediate:
show_pil(lowerCamelCase )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , f"""iter_{iter:03d}.png""" ) )
if self.log:
wandb.log({"Image": wandb.Image(lowerCamelCase )} )
if show_final:
show_pil(lowerCamelCase )
if save_final:
transformed_img.save(os.path.join(self.save_path , f"""iter_{iter:03d}_final.png""" ) )
| 402 |
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
__lowercase = []
__lowercase = set({"(", "[", "{"} )
__lowercase = set({")", "]", "}"} )
__lowercase = {"{": "}", "[": "]", "(": ")"}
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(_SCREAMING_SNAKE_CASE ) == 0 or (len(_SCREAMING_SNAKE_CASE ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(_SCREAMING_SNAKE_CASE ) == 0
def snake_case_ ( ):
__lowercase = input("Enter sequence of brackets: " )
if is_balanced(_SCREAMING_SNAKE_CASE ):
print(_SCREAMING_SNAKE_CASE , "is balanced" )
else:
print(_SCREAMING_SNAKE_CASE , "is not balanced" )
if __name__ == "__main__":
main()
| 402 | 1 |
"""simple docstring"""
import os
# Precomputes a list of the 100 first triangular numbers
UpperCAmelCase_ : Any = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def _A () -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = os.path.dirname(os.path.realpath(UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE_ : List[str] = os.path.join(UpperCamelCase__ , '''words.txt''' )
SCREAMING_SNAKE_CASE_ : int = ''''''
with open(UpperCamelCase__ ) as f:
SCREAMING_SNAKE_CASE_ : Tuple = f.readline()
SCREAMING_SNAKE_CASE_ : List[str] = [word.strip('''\"''' ) for word in words.strip('''\r\n''' ).split(''',''' )]
SCREAMING_SNAKE_CASE_ : Optional[int] = [
word
for word in [sum(ord(UpperCamelCase__ ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(UpperCamelCase__ )
if __name__ == "__main__":
print(solution())
| 715 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
torch.manual_seed(0)
SCREAMING_SNAKE_CASE_ : Optional[Any] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = self.dummy_uncond_unet
SCREAMING_SNAKE_CASE_ : int = ScoreSdeVeScheduler()
SCREAMING_SNAKE_CASE_ : List[str] = ScoreSdeVePipeline(unet=lowercase_ , scheduler=lowercase_)
sde_ve.to(lowercase_)
sde_ve.set_progress_bar_config(disable=lowercase_)
SCREAMING_SNAKE_CASE_ : Tuple = torch.manual_seed(0)
SCREAMING_SNAKE_CASE_ : Dict = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=lowercase_).images
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.manual_seed(0)
SCREAMING_SNAKE_CASE_ : Optional[int] = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=lowercase_ , return_dict=lowercase_)[
0
]
SCREAMING_SNAKE_CASE_ : Dict = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE_ : str = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
@slow
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = '''google/ncsnpp-church-256'''
SCREAMING_SNAKE_CASE_ : str = UNetaDModel.from_pretrained(lowercase_)
SCREAMING_SNAKE_CASE_ : Any = ScoreSdeVeScheduler.from_pretrained(lowercase_)
SCREAMING_SNAKE_CASE_ : Any = ScoreSdeVePipeline(unet=lowercase_ , scheduler=lowercase_)
sde_ve.to(lowercase_)
sde_ve.set_progress_bar_config(disable=lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.manual_seed(0)
SCREAMING_SNAKE_CASE_ : List[str] = sde_ve(num_inference_steps=10 , output_type='''numpy''' , generator=lowercase_).images
SCREAMING_SNAKE_CASE_ : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
SCREAMING_SNAKE_CASE_ : int = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
| 176 | 0 |
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser(
description=(
"""Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned"""
""" Distillation"""
)
)
parser.add_argument("""--model_type""", default="""roberta""", choices=["""roberta""", """gpt2"""])
parser.add_argument("""--model_name""", default="""roberta-large""", type=str)
parser.add_argument("""--dump_checkpoint""", default="""serialization_dir/tf_roberta_048131723.pth""", type=str)
parser.add_argument("""--vocab_transform""", action="""store_true""")
lowercase_ = parser.parse_args()
if args.model_type == "roberta":
lowercase_ = RobertaForMaskedLM.from_pretrained(args.model_name)
lowercase_ = """roberta"""
elif args.model_type == "gpt2":
lowercase_ = GPTaLMHeadModel.from_pretrained(args.model_name)
lowercase_ = """transformer"""
lowercase_ = model.state_dict()
lowercase_ = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
lowercase_ = state_dict[f'''{prefix}.{param_name}''']
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
lowercase_ = f'''{prefix}.embeddings.{w}.weight'''
lowercase_ = state_dict[param_name]
for w in ["weight", "bias"]:
lowercase_ = f'''{prefix}.embeddings.LayerNorm.{w}'''
lowercase_ = state_dict[param_name]
# Transformer Blocks #
lowercase_ = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
lowercase_ = state_dict[
f'''{prefix}.h.{teacher_idx}.{layer}.{w}'''
]
lowercase_ = state_dict[f'''{prefix}.h.{teacher_idx}.attn.bias''']
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
lowercase_ = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}'''
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
lowercase_ = state_dict[f'''{layer}''']
if args.vocab_transform:
for w in ["weight", "bias"]:
lowercase_ = state_dict[f'''lm_head.dense.{w}''']
lowercase_ = state_dict[f'''lm_head.layer_norm.{w}''']
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
lowercase_ = state_dict[f'''{prefix}.ln_f.{w}''']
lowercase_ = state_dict["""lm_head.weight"""]
print(f'''N layers selected for distillation: {std_idx}''')
print(f'''Number of params transferred for distillation: {len(compressed_sd.keys())}''')
print(f'''Save transferred checkpoint to {args.dump_checkpoint}.''')
torch.save(compressed_sd, args.dump_checkpoint)
| 235 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {"""vocab_file""": """spm_char.model"""}
lowercase_ = {
"""vocab_file""": {
"""microsoft/speecht5_asr""": """https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model""",
"""microsoft/speecht5_tts""": """https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model""",
"""microsoft/speecht5_vc""": """https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model""",
}
}
lowercase_ = {
"""microsoft/speecht5_asr""": 1_024,
"""microsoft/speecht5_tts""": 1_024,
"""microsoft/speecht5_vc""": 1_024,
}
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : str = VOCAB_FILES_NAMES
_UpperCamelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Optional[Any] = ['input_ids', 'attention_mask']
def __init__( self : Optional[int] , a : Any , a : Any="<s>" , a : List[Any]="</s>" , a : List[str]="<unk>" , a : Any="<pad>" , a : Optional[Dict[str, Any]] = None , **a : Optional[Any] , )-> None:
"""simple docstring"""
lowercase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=a , eos_token=a , unk_token=a , pad_token=a , sp_model_kwargs=self.sp_model_kwargs , **a , )
lowercase__ = vocab_file
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(a )
@property
def SCREAMING_SNAKE_CASE_ ( self : Any )-> Tuple:
"""simple docstring"""
return self.sp_model.get_piece_size()
def SCREAMING_SNAKE_CASE_ ( self : int )-> Tuple:
"""simple docstring"""
lowercase__ = {self.convert_ids_to_tokens(a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[int] )-> str:
"""simple docstring"""
lowercase__ = self.__dict__.copy()
lowercase__ = None
return state
def __setstate__( self : Dict , a : Union[str, Any] )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
lowercase__ = {}
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , a : str )-> List[str]:
"""simple docstring"""
return self.sp_model.encode(a , out_type=a )
def SCREAMING_SNAKE_CASE_ ( self : int , a : Optional[Any] )-> str:
"""simple docstring"""
return self.sp_model.piece_to_id(a )
def SCREAMING_SNAKE_CASE_ ( self : str , a : List[Any] )-> Dict:
"""simple docstring"""
lowercase__ = self.sp_model.IdToPiece(a )
return token
def SCREAMING_SNAKE_CASE_ ( self : str , a : Dict )-> List[str]:
"""simple docstring"""
lowercase__ = []
lowercase__ = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(a ) + token
lowercase__ = []
else:
current_sub_tokens.append(a )
out_string += self.sp_model.decode(a )
return out_string.strip()
def SCREAMING_SNAKE_CASE_ ( self : str , a : List[Any] , a : Optional[Any]=None )-> List[int]:
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE_ ( self : int , a : List[int] , a : Optional[List[int]] = None , a : bool = False )-> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a , token_ids_a=a , already_has_special_tokens=a )
lowercase__ = [1]
if token_ids_a is None:
return ([0] * len(a )) + suffix_ones
return ([0] * len(a )) + ([0] * len(a )) + suffix_ones
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , a : str , a : Optional[str] = None )-> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(a ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase__ = os.path.join(
a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a )
elif not os.path.isfile(self.vocab_file ):
with open(a , 'wb' ) as fi:
lowercase__ = self.sp_model.serialized_model_proto()
fi.write(a )
return (out_vocab_file,)
| 235 | 1 |
'''simple docstring'''
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = """new-model"""
if is_tf_available():
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = NewModelConfig
@require_tf
class A_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCAmelCase_ ( self : Dict ) -> Dict:
UpperCAmelCase : str = 'bert-base-cased'
UpperCAmelCase : List[Any] = AutoConfig.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
UpperCAmelCase : Optional[int] = TFAutoModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
@slow
def UpperCAmelCase_ ( self : Optional[Any] ) -> List[str]:
UpperCAmelCase : Dict = 'bert-base-cased'
UpperCAmelCase : int = AutoConfig.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
UpperCAmelCase : Tuple = TFAutoModelForPreTraining.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
@slow
def UpperCAmelCase_ ( self : Dict ) -> Any:
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : List[Any] = AutoConfig.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
UpperCAmelCase : str = TFAutoModelForCausalLM.from_pretrained(lowercase_ )
UpperCAmelCase , UpperCAmelCase : int = TFAutoModelForCausalLM.from_pretrained(lowercase_ , output_loading_info=lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
@slow
def UpperCAmelCase_ ( self : List[Any] ) -> Dict:
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : Tuple = AutoConfig.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
UpperCAmelCase : Optional[Any] = TFAutoModelWithLMHead.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
@slow
def UpperCAmelCase_ ( self : Dict ) -> List[str]:
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : List[Any] = AutoConfig.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
UpperCAmelCase : List[Any] = TFAutoModelForMaskedLM.from_pretrained(lowercase_ )
UpperCAmelCase , UpperCAmelCase : Tuple = TFAutoModelForMaskedLM.from_pretrained(lowercase_ , output_loading_info=lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
@slow
def UpperCAmelCase_ ( self : int ) -> List[str]:
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : Dict = AutoConfig.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
UpperCAmelCase : Union[str, Any] = TFAutoModelForSeqaSeqLM.from_pretrained(lowercase_ )
UpperCAmelCase , UpperCAmelCase : str = TFAutoModelForSeqaSeqLM.from_pretrained(lowercase_ , output_loading_info=lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
@slow
def UpperCAmelCase_ ( self : Tuple ) -> Optional[int]:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
UpperCAmelCase : Tuple = AutoConfig.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
UpperCAmelCase : Tuple = TFAutoModelForSequenceClassification.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
@slow
def UpperCAmelCase_ ( self : Optional[Any] ) -> Tuple:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
UpperCAmelCase : Dict = AutoConfig.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
UpperCAmelCase : Optional[int] = TFAutoModelForQuestionAnswering.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
@slow
@require_tensorflow_probability
def UpperCAmelCase_ ( self : Optional[Any] ) -> Any:
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
UpperCAmelCase : str = AutoConfig.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
UpperCAmelCase : Tuple = TFAutoModelForTableQuestionAnswering.from_pretrained(lowercase_ )
UpperCAmelCase , UpperCAmelCase : Tuple = TFAutoModelForTableQuestionAnswering.from_pretrained(
lowercase_ , output_loading_info=lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : int ) -> Optional[Any]:
UpperCAmelCase : Optional[Any] = TFAutoModelWithLMHead.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowercase_ ) , 14_410 )
def UpperCAmelCase_ ( self : int ) -> Optional[Any]:
UpperCAmelCase : Union[str, Any] = TFAutoModelWithLMHead.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowercase_ ) , 14_410 )
def UpperCAmelCase_ ( self : int ) -> List[Any]:
# For the auto model mapping, FunnelConfig has two models: FunnelModel and FunnelBaseModel
UpperCAmelCase : List[str] = TFAutoModel.from_pretrained('sgugger/funnel-random-tiny' )
self.assertIsInstance(lowercase_ , lowercase_ )
UpperCAmelCase : List[Any] = copy.deepcopy(model.config )
UpperCAmelCase : Any = ['FunnelBaseModel']
UpperCAmelCase : Optional[Any] = TFAutoModel.from_config(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowercase_ )
UpperCAmelCase : Optional[Any] = TFAutoModel.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : List[Any] ) -> int:
try:
AutoConfig.register('new-model' , lowercase_ )
UpperCAmelCase : Optional[int] = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(lowercase_ ):
auto_class.register(lowercase_ , lowercase_ )
auto_class.register(lowercase_ , lowercase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowercase_ ):
auto_class.register(lowercase_ , lowercase_ )
# Now that the config is registered, it can be used as any other config with the auto-API
UpperCAmelCase : Optional[Any] = BertModelTester(self ).get_config()
UpperCAmelCase : Union[str, Any] = NewModelConfig(**tiny_config.to_dict() )
UpperCAmelCase : Optional[int] = auto_class.from_config(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowercase_ )
UpperCAmelCase : int = auto_class.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Union[str, Any]:
with self.assertRaisesRegex(
lowercase_ , 'bert-base is not a local folder and is not a valid model identifier' ):
UpperCAmelCase : Optional[Any] = TFAutoModel.from_pretrained('bert-base' )
def UpperCAmelCase_ ( self : List[Any] ) -> Tuple:
with self.assertRaisesRegex(
lowercase_ , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
UpperCAmelCase : List[str] = TFAutoModel.from_pretrained(lowercase_ , revision='aaaaaa' )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> str:
with self.assertRaisesRegex(
lowercase_ , 'hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin' , ):
UpperCAmelCase : Optional[Any] = TFAutoModel.from_pretrained('hf-internal-testing/config-no-model' )
def UpperCAmelCase_ ( self : List[Any] ) -> Optional[int]:
with self.assertRaisesRegex(lowercase_ , 'Use `from_pt=True` to load this model' ):
UpperCAmelCase : Tuple = TFAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only' )
def UpperCAmelCase_ ( self : Any ) -> str:
# Make sure we have cached the model.
UpperCAmelCase : str = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert' )
with RequestCounter() as counter:
UpperCAmelCase : Tuple = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
# With a sharded checkpoint
UpperCAmelCase : Optional[Any] = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded' )
with RequestCounter() as counter:
UpperCAmelCase : Optional[Any] = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 695 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = """openai/whisper-base"""
UpperCAmelCase_ : Union[str, Any] = (
"""This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the """
"""transcribed text."""
)
UpperCAmelCase_ : Dict = """transcriber"""
UpperCAmelCase_ : int = WhisperProcessor
UpperCAmelCase_ : Optional[int] = WhisperForConditionalGeneration
UpperCAmelCase_ : Dict = ["""audio"""]
UpperCAmelCase_ : Optional[int] = ["""text"""]
def UpperCAmelCase_ ( self : Tuple , lowercase_ : str ) -> Optional[int]:
return self.pre_processor(lowercase_ , return_tensors='pt' ).input_features
def UpperCAmelCase_ ( self : Tuple , lowercase_ : int ) -> List[str]:
return self.model.generate(inputs=lowercase_ )
def UpperCAmelCase_ ( self : str , lowercase_ : List[Any] ) -> List[str]:
return self.pre_processor.batch_decode(lowercase_ , skip_special_tokens=lowercase_ )[0]
| 695 | 1 |
from __future__ import annotations
def _a ( lowercase__ : list[int] ): # This function is recursive
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = len(lowercase__ )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
SCREAMING_SNAKE_CASE__ : Optional[int] = array[0]
SCREAMING_SNAKE_CASE__ : int = False
SCREAMING_SNAKE_CASE__ : Any = 1
SCREAMING_SNAKE_CASE__ : list[int] = []
while not is_found and i < array_length:
if array[i] < pivot:
SCREAMING_SNAKE_CASE__ : List[str] = True
SCREAMING_SNAKE_CASE__ : Any = [element for element in array[i:] if element >= array[i]]
SCREAMING_SNAKE_CASE__ : Optional[Any] = longest_subsequence(lowercase__ )
if len(lowercase__ ) > len(lowercase__ ):
SCREAMING_SNAKE_CASE__ : Any = temp_array
else:
i += 1
SCREAMING_SNAKE_CASE__ : Any = [element for element in array[1:] if element >= pivot]
SCREAMING_SNAKE_CASE__ : str = [pivot, *longest_subsequence(lowercase__ )]
if len(lowercase__ ) > len(lowercase__ ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 85 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__A = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["""MLukeTokenizer"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
__A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 93 | 0 |
"""simple docstring"""
from collections.abc import Sequence
from queue import Queue
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None) -> int:
'''simple docstring'''
snake_case__ : Dict = start
snake_case__ : Tuple = end
snake_case__ : str = val
snake_case__ : List[str] = (start + end) // 2
snake_case__ : List[Any] = left
snake_case__ : List[str] = right
def __repr__( self) -> int:
'''simple docstring'''
return f"""SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})"""
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__) -> Optional[Any]:
'''simple docstring'''
snake_case__ : List[Any] = collection
snake_case__ : List[Any] = function
if self.collection:
snake_case__ : Dict = self._build_tree(0 , len(lowerCamelCase__) - 1)
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__) -> List[str]:
'''simple docstring'''
self._update_tree(self.root , lowerCamelCase__ , lowerCamelCase__)
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__) -> Union[str, Any]:
'''simple docstring'''
return self._query_range(self.root , lowerCamelCase__ , lowerCamelCase__)
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__) -> Dict:
'''simple docstring'''
if start == end:
return SegmentTreeNode(lowerCamelCase__ , lowerCamelCase__ , self.collection[start])
snake_case__ : str = (start + end) // 2
snake_case__ : Union[str, Any] = self._build_tree(lowerCamelCase__ , lowerCamelCase__)
snake_case__ : Union[str, Any] = self._build_tree(mid + 1 , lowerCamelCase__)
return SegmentTreeNode(lowerCamelCase__ , lowerCamelCase__ , self.fn(left.val , right.val) , lowerCamelCase__ , lowerCamelCase__)
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__) -> Dict:
'''simple docstring'''
if node.start == i and node.end == i:
snake_case__ : Optional[int] = val
return
if i <= node.mid:
self._update_tree(node.left , lowerCamelCase__ , lowerCamelCase__)
else:
self._update_tree(node.right , lowerCamelCase__ , lowerCamelCase__)
snake_case__ : List[Any] = self.fn(node.left.val , node.right.val)
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__) -> Tuple:
'''simple docstring'''
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , lowerCamelCase__ , lowerCamelCase__)
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , lowerCamelCase__ , node.mid) , self._query_range(node.right , node.mid + 1 , lowerCamelCase__) , )
else:
# range in right child tree
return self._query_range(node.right , lowerCamelCase__ , lowerCamelCase__)
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
if self.root is not None:
snake_case__ : int = Queue()
queue.put(self.root)
while not queue.empty():
snake_case__ : List[Any] = queue.get()
yield node
if node.left is not None:
queue.put(node.left)
if node.right is not None:
queue.put(node.right)
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print("""*""" * 50)
lowercase = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 150 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase = logging.get_logger(__name__)
lowercase = {
"""BAAI/AltCLIP""": """https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json""",
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class SCREAMING_SNAKE_CASE_ ( _lowercase):
'''simple docstring'''
__magic_name__ : str = '''altclip_text_model'''
def __init__( self , lowerCamelCase__=250_002 , lowerCamelCase__=1_024 , lowerCamelCase__=24 , lowerCamelCase__=16 , lowerCamelCase__=4_096 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=514 , lowerCamelCase__=1 , lowerCamelCase__=0.02 , lowerCamelCase__=0.02 , lowerCamelCase__=1E-05 , lowerCamelCase__=1 , lowerCamelCase__=0 , lowerCamelCase__=2 , lowerCamelCase__="absolute" , lowerCamelCase__=True , lowerCamelCase__=768 , **lowerCamelCase__ , ) -> Optional[int]:
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , **lowerCamelCase__)
snake_case__ : str = vocab_size
snake_case__ : int = hidden_size
snake_case__ : Dict = num_hidden_layers
snake_case__ : Any = num_attention_heads
snake_case__ : List[str] = hidden_act
snake_case__ : List[Any] = intermediate_size
snake_case__ : Tuple = hidden_dropout_prob
snake_case__ : Union[str, Any] = attention_probs_dropout_prob
snake_case__ : Tuple = max_position_embeddings
snake_case__ : Any = type_vocab_size
snake_case__ : int = initializer_range
snake_case__ : Dict = initializer_factor
snake_case__ : Optional[Any] = layer_norm_eps
snake_case__ : List[str] = position_embedding_type
snake_case__ : Union[str, Any] = use_cache
snake_case__ : Dict = project_dim
class SCREAMING_SNAKE_CASE_ ( _lowercase):
'''simple docstring'''
__magic_name__ : int = '''altclip_vision_model'''
def __init__( self , lowerCamelCase__=768 , lowerCamelCase__=3_072 , lowerCamelCase__=512 , lowerCamelCase__=12 , lowerCamelCase__=12 , lowerCamelCase__=3 , lowerCamelCase__=224 , lowerCamelCase__=32 , lowerCamelCase__="quick_gelu" , lowerCamelCase__=1E-5 , lowerCamelCase__=0.0 , lowerCamelCase__=0.02 , lowerCamelCase__=1.0 , **lowerCamelCase__ , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**lowerCamelCase__)
snake_case__ : str = hidden_size
snake_case__ : List[Any] = intermediate_size
snake_case__ : Union[str, Any] = projection_dim
snake_case__ : Union[str, Any] = num_hidden_layers
snake_case__ : List[Any] = num_attention_heads
snake_case__ : Optional[Any] = num_channels
snake_case__ : Tuple = patch_size
snake_case__ : List[Any] = image_size
snake_case__ : Optional[Any] = initializer_range
snake_case__ : Union[str, Any] = initializer_factor
snake_case__ : int = attention_dropout
snake_case__ : List[str] = layer_norm_eps
snake_case__ : List[str] = hidden_act
@classmethod
def UpperCAmelCase ( cls , lowerCamelCase__ , **lowerCamelCase__) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(lowerCamelCase__)
snake_case__, snake_case__ : str = cls.get_config_dict(lowerCamelCase__ , **lowerCamelCase__)
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get("model_type") == "altclip":
snake_case__ : List[Any] = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type") and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""")
return cls.from_dict(lowerCamelCase__ , **lowerCamelCase__)
class SCREAMING_SNAKE_CASE_ ( _lowercase):
'''simple docstring'''
__magic_name__ : Dict = '''altclip'''
__magic_name__ : Union[str, Any] = True
def __init__( self , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=768 , lowerCamelCase__=2.65_92 , **lowerCamelCase__) -> Any:
'''simple docstring'''
snake_case__ : List[Any] = kwargs.pop("text_config_dict" , lowerCamelCase__)
snake_case__ : str = kwargs.pop("vision_config_dict" , lowerCamelCase__)
super().__init__(**lowerCamelCase__)
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
snake_case__ : str = {}
# This is the complete result when using `text_config_dict`.
snake_case__ : str = AltCLIPTextConfig(**lowerCamelCase__).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
snake_case__ : List[Any] = (
f"""`{key}` is found in both `text_config_dict` and `text_config` but with different values. """
f"""The value `text_config_dict[\"{key}\"]` will be used instead."""
)
# If inferred from default argument values (just to be super careful)
else:
snake_case__ : Dict = (
f"""`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The """
f"""value `text_config[\"{key}\"]` will be overriden."""
)
logger.warning(lowerCamelCase__)
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict)
if vision_config_dict is not None:
if vision_config is None:
snake_case__ : List[Any] = {}
# This is the complete result when using `vision_config_dict`.
snake_case__ : Union[str, Any] = AltCLIPVisionConfig(**lowerCamelCase__).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
snake_case__ : List[Any] = {
str(lowerCamelCase__): value for key, value in _vision_config_dict["id2label"].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
snake_case__ : Any = (
f"""`{key}` is found in both `vision_config_dict` and `vision_config` but with different """
f"""values. The value `vision_config_dict[\"{key}\"]` will be used instead."""
)
# If inferred from default argument values (just to be super careful)
else:
snake_case__ : int = (
f"""`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. """
f"""The value `vision_config[\"{key}\"]` will be overriden."""
)
logger.warning(lowerCamelCase__)
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict)
if text_config is None:
snake_case__ : Tuple = {}
logger.info("`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.")
if vision_config is None:
snake_case__ : List[Any] = {}
logger.info("`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.")
snake_case__ : Any = AltCLIPTextConfig(**lowerCamelCase__)
snake_case__ : Dict = AltCLIPVisionConfig(**lowerCamelCase__)
snake_case__ : List[str] = projection_dim
snake_case__ : Tuple = logit_scale_init_value
snake_case__ : List[Any] = 1.0
@classmethod
def UpperCAmelCase ( cls , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__) -> Optional[Any]:
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowerCamelCase__)
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
snake_case__ : Any = copy.deepcopy(self.__dict__)
snake_case__ : Optional[Any] = self.text_config.to_dict()
snake_case__ : List[str] = self.vision_config.to_dict()
snake_case__ : str = self.__class__.model_type
return output
| 150 | 1 |
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self : str ) -> Dict:
'''simple docstring'''
__lowercase = StableDiffusionKDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' )
__lowercase = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
sd_pipe.set_scheduler('sample_euler' )
__lowercase = 'A painting of a squirrel eating a burger'
__lowercase = torch.manual_seed(0 )
__lowercase = sd_pipe([prompt] , generator=__lowerCamelCase , guidance_scale=9.0 , num_inference_steps=20 , output_type='np' )
__lowercase = output.images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__lowercase = np.array([0.0447, 0.0492, 0.0468, 0.0408, 0.0383, 0.0408, 0.0354, 0.0380, 0.0339] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase ( self : Tuple ) -> Dict:
'''simple docstring'''
__lowercase = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
__lowercase = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
sd_pipe.set_scheduler('sample_euler' )
__lowercase = 'A painting of a squirrel eating a burger'
__lowercase = torch.manual_seed(0 )
__lowercase = sd_pipe([prompt] , generator=__lowerCamelCase , guidance_scale=9.0 , num_inference_steps=20 , output_type='np' )
__lowercase = output.images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__lowercase = np.array([0.1237, 0.1320, 0.1438, 0.1359, 0.1390, 0.1132, 0.1277, 0.1175, 0.1112] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1
def UpperCAmelCase ( self : Union[str, Any] ) -> str:
'''simple docstring'''
__lowercase = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
__lowercase = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
sd_pipe.set_scheduler('sample_dpmpp_2m' )
__lowercase = 'A painting of a squirrel eating a burger'
__lowercase = torch.manual_seed(0 )
__lowercase = sd_pipe(
[prompt] , generator=__lowerCamelCase , guidance_scale=7.5 , num_inference_steps=15 , output_type='np' , use_karras_sigmas=__lowerCamelCase , )
__lowercase = output.images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__lowercase = np.array(
[0.11381689, 0.12112921, 0.1389457, 0.12549606, 0.1244964, 0.10831517, 0.11562866, 0.10867816, 0.10499048] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 375 |
from math import factorial
def SCREAMING_SNAKE_CASE ( snake_case , snake_case ) -> int:
# If either of the conditions are true, the function is being asked
# to calculate a factorial of a negative number, which is not possible
if n < k or k < 0:
raise ValueError('Please enter positive integers for n and k where n >= k' )
return factorial(snake_case ) // (factorial(snake_case ) * factorial(n - k ))
if __name__ == "__main__":
print(
'''The number of five-card hands possible from a standard''',
F"""fifty-two card deck is: {combinations(52, 5)}\n""",
)
print(
'''If a class of 40 students must be arranged into groups of''',
F"""4 for group projects, there are {combinations(40, 4)} ways""",
'''to arrange them.\n''',
)
print(
'''If 10 teams are competing in a Formula One race, there''',
F"""are {combinations(10, 3)} ways that first, second and""",
'''third place can be awarded.''',
)
| 375 | 1 |
'''simple docstring'''
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> list[int]:
"""simple docstring"""
if num <= 0:
raise ValueError("""Input must be a positive integer""" )
_SCREAMING_SNAKE_CASE = [True] * (num + 1)
_SCREAMING_SNAKE_CASE = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , SCREAMING_SNAKE_CASE_ ):
_SCREAMING_SNAKE_CASE = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase__ : Optional[Any] = int(input("Enter a positive integer: ").strip())
print(prime_sieve_eratosthenes(user_num))
| 0 |
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""encoder.embed_positions._float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = emb.weight.shape
_SCREAMING_SNAKE_CASE = nn.Linear(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , bias=SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = emb.weight.data
return lin_layer
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = torch.load(SCREAMING_SNAKE_CASE_ , map_location="""cpu""" )
_SCREAMING_SNAKE_CASE = mam_aaa["""args"""] or mam_aaa["""cfg"""]["""model"""]
_SCREAMING_SNAKE_CASE = mam_aaa["""model"""]
remove_ignore_keys_(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = state_dict["""encoder.embed_tokens.weight"""].shape[0]
_SCREAMING_SNAKE_CASE = MaMaaaConfig(
vocab_size=SCREAMING_SNAKE_CASE_ , max_position_embeddings=10_24 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""relu""" , )
_SCREAMING_SNAKE_CASE = state_dict["""decoder.embed_tokens.weight"""]
_SCREAMING_SNAKE_CASE = MaMaaaForConditionalGeneration(SCREAMING_SNAKE_CASE_ )
model.model.load_state_dict(SCREAMING_SNAKE_CASE_ , strict=SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
UpperCamelCase__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument("fairseq_path", type=str, help="path to a model.pt on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
UpperCamelCase__ : List[str] = parser.parse_args()
UpperCamelCase__ : Any = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 0 | 1 |
'''simple docstring'''
import random
from .binary_exp_mod import bin_exp_mod
def __snake_case ( UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any]=1000 ):
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
lowerCamelCase_ = n - 1
lowerCamelCase_ = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
lowerCamelCase_ = 0
while count < prec:
lowerCamelCase_ = random.randint(2 , n - 1 )
lowerCamelCase_ = bin_exp_mod(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if b != 1:
lowerCamelCase_ = True
for _ in range(lowerCAmelCase__ ):
if b == n - 1:
lowerCamelCase_ = False
break
lowerCamelCase_ = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
a_ : Optional[int] = abs(int(input("""Enter bound : """).strip()))
print("""Here's the list of primes:""")
print(""", """.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 675 | from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
snake_case = TypeVar("T")
class __A ( Generic[T] ):
'''simple docstring'''
a_ = 42 # Cache store of keys
a_ = 42 # References of the keys in cache
a_ = 10 # Maximum capacity of cache
def __init__( self , _snake_case ):
_lowerCAmelCase : Tuple = deque()
_lowerCAmelCase : List[Any] = set()
if not n:
_lowerCAmelCase : Any = sys.maxsize
elif n < 0:
raise ValueError("n should be an integer greater than 0." )
else:
_lowerCAmelCase : List[str] = n
def SCREAMING_SNAKE_CASE__ ( self , _snake_case ):
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
_lowerCAmelCase : Optional[int] = self.dq_store.pop()
self.key_reference.remove(_snake_case )
else:
self.dq_store.remove(_snake_case )
self.dq_store.appendleft(_snake_case )
self.key_reference.add(_snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
for k in self.dq_store:
print(_snake_case )
def __repr__( self ):
return F"""LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case = LRUCache(4)
lru_cache.refer("A")
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer("A")
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 424 | 0 |
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowercase ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=2 , __UpperCAmelCase=99 , __UpperCAmelCase=0 , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_12 , __UpperCAmelCase=12 , __UpperCAmelCase=2 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase="last" , __UpperCAmelCase=None , __UpperCAmelCase=None , ) -> Any:
A : int = parent
A : Union[str, Any] = batch_size
A : Union[str, Any] = seq_length
A : Any = is_training
A : List[Any] = use_input_lengths
A : Optional[int] = use_token_type_ids
A : Optional[int] = use_labels
A : str = gelu_activation
A : int = sinusoidal_embeddings
A : Tuple = causal
A : Any = asm
A : str = n_langs
A : str = vocab_size
A : List[str] = n_special
A : Optional[int] = hidden_size
A : Any = num_hidden_layers
A : int = num_attention_heads
A : Optional[Any] = hidden_dropout_prob
A : Any = attention_probs_dropout_prob
A : Dict = max_position_embeddings
A : Optional[int] = type_vocab_size
A : int = type_sequence_label_size
A : List[str] = initializer_range
A : Optional[Any] = num_labels
A : Optional[Any] = num_choices
A : List[Any] = summary_type
A : int = use_proj
A : Optional[int] = scope
def snake_case ( self ) -> Union[str, Any]:
A : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A : str = random_attention_mask([self.batch_size, self.seq_length] )
A : List[Any] = None
if self.use_input_lengths:
A : Union[str, Any] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
A : Tuple = None
if self.use_token_type_ids:
A : Dict = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
A : Dict = None
A : Union[str, Any] = None
A : Tuple = None
if self.use_labels:
A : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A : str = ids_tensor([self.batch_size] , 2 ).float()
A : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
A : List[Any] = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def snake_case ( self ) -> Optional[Any]:
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Union[str, Any]:
A : Union[str, Any] = FlaubertModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
A : Optional[Any] = model(__UpperCAmelCase , lengths=__UpperCAmelCase , langs=__UpperCAmelCase )
A : Dict = model(__UpperCAmelCase , langs=__UpperCAmelCase )
A : List[Any] = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> int:
A : List[Any] = FlaubertWithLMHeadModel(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
A : Optional[int] = model(__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Optional[Any]:
A : Dict = FlaubertForQuestionAnsweringSimple(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
A : Tuple = model(__UpperCAmelCase )
A : Optional[int] = model(__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Any:
A : Tuple = FlaubertForQuestionAnswering(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
A : Optional[Any] = model(__UpperCAmelCase )
A : Optional[Any] = model(
__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , cls_index=__UpperCAmelCase , is_impossible=__UpperCAmelCase , p_mask=__UpperCAmelCase , )
A : int = model(
__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , cls_index=__UpperCAmelCase , is_impossible=__UpperCAmelCase , )
((A) , ) : Union[str, Any] = result_with_labels.to_tuple()
A : Any = model(__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase )
((A) , ) : Any = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> int:
A : Dict = FlaubertForSequenceClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
A : Dict = model(__UpperCAmelCase )
A : Any = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> int:
A : List[Any] = self.num_labels
A : str = FlaubertForTokenClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
A : Union[str, Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Dict:
A : List[str] = self.num_choices
A : Optional[int] = FlaubertForMultipleChoice(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
A : Any = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A : Tuple = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A : Optional[int] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A : Optional[int] = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case ( self ) -> List[Any]:
A : List[str] = self.prepare_config_and_inputs()
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) : Dict = config_and_inputs
A : Any = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''lengths''': input_lengths,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ : Dict = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
UpperCAmelCase_ : Optional[int] = (
{
'''feature-extraction''': FlaubertModel,
'''fill-mask''': FlaubertWithLMHeadModel,
'''question-answering''': FlaubertForQuestionAnsweringSimple,
'''text-classification''': FlaubertForSequenceClassification,
'''token-classification''': FlaubertForTokenClassification,
'''zero-shot''': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Any:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ) -> Optional[int]:
A : Tuple = super()._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
A : List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__UpperCAmelCase )
A : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__UpperCAmelCase )
return inputs_dict
def snake_case ( self ) -> Optional[Any]:
A : List[Any] = FlaubertModelTester(self )
A : Optional[int] = ConfigTester(self , config_class=__UpperCAmelCase , emb_dim=37 )
def snake_case ( self ) -> Tuple:
self.config_tester.run_common_tests()
def snake_case ( self ) -> str:
A : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*__UpperCAmelCase )
def snake_case ( self ) -> Dict:
A : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*__UpperCAmelCase )
def snake_case ( self ) -> Tuple:
A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*__UpperCAmelCase )
def snake_case ( self ) -> int:
A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*__UpperCAmelCase )
def snake_case ( self ) -> List[str]:
A : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*__UpperCAmelCase )
def snake_case ( self ) -> Optional[int]:
A : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*__UpperCAmelCase )
def snake_case ( self ) -> Optional[int]:
A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*__UpperCAmelCase )
@slow
def snake_case ( self ) -> List[Any]:
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A : List[Any] = FlaubertModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
@slow
@require_torch_gpu
def snake_case ( self ) -> List[str]:
A , A : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
A : int = True
A : Any = model_class(config=__UpperCAmelCase )
A : Any = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase )
A : str = torch.jit.trace(
__UpperCAmelCase , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(__UpperCAmelCase , os.path.join(__UpperCAmelCase , '''traced_model.pt''' ) )
A : Any = torch.jit.load(os.path.join(__UpperCAmelCase , '''traced_model.pt''' ) , map_location=__UpperCAmelCase )
loaded(inputs_dict['''input_ids'''].to(__UpperCAmelCase ) , inputs_dict['''attention_mask'''].to(__UpperCAmelCase ) )
@require_torch
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case ( self ) -> Optional[int]:
A : Dict = FlaubertModel.from_pretrained('''flaubert/flaubert_base_cased''' )
A : List[str] = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
with torch.no_grad():
A : List[str] = model(__UpperCAmelCase )[0]
A : str = torch.Size((1, 11, 7_68) )
self.assertEqual(output.shape , __UpperCAmelCase )
A : List[Any] = torch.tensor(
[[[-2.6_2_5_1, -1.4_2_9_8, -0.0_2_2_7], [-2.8_5_1_0, -1.6_3_8_7, 0.2_2_5_8], [-2.8_1_1_4, -1.1_8_3_2, -0.3_0_6_6]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
| 423 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowercase : int = {
"configuration_gpt_bigcode": ["GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTBigCodeConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[Any] = [
"GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTBigCodeForSequenceClassification",
"GPTBigCodeForTokenClassification",
"GPTBigCodeForCausalLM",
"GPTBigCodeModel",
"GPTBigCodePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
lowercase : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 423 | 1 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
_a = logging.get_logger(__name__)
class _UpperCAmelCase( lowerCamelCase ):
def __init__( self , *__a , **__a) -> None:
'''simple docstring'''
warnings.warn(
'''The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DeiTImageProcessor instead.''' , __a , )
super().__init__(*__a , **__a)
| 19 |
"""simple docstring"""
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_a = logging.get_logger(__name__)
_a = {
"""facebook/detr-resnet-50""": """https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json""",
# See all DETR models at https://huggingface.co/models?filter=detr
}
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = 'detr'
lowercase__ = ['past_key_values']
lowercase__ = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , __a=True , __a=None , __a=3 , __a=1_00 , __a=6 , __a=20_48 , __a=8 , __a=6 , __a=20_48 , __a=8 , __a=0.0 , __a=0.0 , __a=True , __a="relu" , __a=2_56 , __a=0.1 , __a=0.0 , __a=0.0 , __a=0.02 , __a=1.0 , __a=False , __a="sine" , __a="resnet50" , __a=True , __a=False , __a=1 , __a=5 , __a=2 , __a=1 , __a=1 , __a=5 , __a=2 , __a=0.1 , **__a , ) -> int:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''')
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''')
_UpperCamelCase = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''])
elif isinstance(__a , __a):
_UpperCamelCase = backbone_config.get('''model_type''')
_UpperCamelCase = CONFIG_MAPPING[backbone_model_type]
_UpperCamelCase = config_class.from_dict(__a)
# set timm attributes to None
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None, None, None
_UpperCamelCase = use_timm_backbone
_UpperCamelCase = backbone_config
_UpperCamelCase = num_channels
_UpperCamelCase = num_queries
_UpperCamelCase = d_model
_UpperCamelCase = encoder_ffn_dim
_UpperCamelCase = encoder_layers
_UpperCamelCase = encoder_attention_heads
_UpperCamelCase = decoder_ffn_dim
_UpperCamelCase = decoder_layers
_UpperCamelCase = decoder_attention_heads
_UpperCamelCase = dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = activation_dropout
_UpperCamelCase = activation_function
_UpperCamelCase = init_std
_UpperCamelCase = init_xavier_std
_UpperCamelCase = encoder_layerdrop
_UpperCamelCase = decoder_layerdrop
_UpperCamelCase = encoder_layers
_UpperCamelCase = auxiliary_loss
_UpperCamelCase = position_embedding_type
_UpperCamelCase = backbone
_UpperCamelCase = use_pretrained_backbone
_UpperCamelCase = dilation
# Hungarian matcher
_UpperCamelCase = class_cost
_UpperCamelCase = bbox_cost
_UpperCamelCase = giou_cost
# Loss coefficients
_UpperCamelCase = mask_loss_coefficient
_UpperCamelCase = dice_loss_coefficient
_UpperCamelCase = bbox_loss_coefficient
_UpperCamelCase = giou_loss_coefficient
_UpperCamelCase = eos_coefficient
super().__init__(is_encoder_decoder=__a , **__a)
@property
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
return self.d_model
@classmethod
def UpperCAmelCase ( cls , __a , **__a) -> int:
'''simple docstring'''
return cls(backbone_config=__a , **__a)
def UpperCAmelCase ( self) -> Dict[str, any]:
'''simple docstring'''
_UpperCamelCase = copy.deepcopy(self.__dict__)
if output["backbone_config"] is not None:
_UpperCamelCase = self.backbone_config.to_dict()
_UpperCamelCase = self.__class__.model_type
return output
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = version.parse('1.11' )
@property
def UpperCAmelCase ( self) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
])
@property
def UpperCAmelCase ( self) -> float:
'''simple docstring'''
return 1e-5
@property
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
return 12
| 19 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ..utils import _LazyModule
lowerCamelCase__ : Optional[Any] = {
"""config""": [
"""EXTERNAL_DATA_FORMAT_SIZE_LIMIT""",
"""OnnxConfig""",
"""OnnxConfigWithPast""",
"""OnnxSeq2SeqConfigWithPast""",
"""PatchingSpec""",
],
"""convert""": ["""export""", """validate_model_outputs"""],
"""features""": ["""FeaturesManager"""],
"""utils""": ["""ParameterFormat""", """compute_serialized_parameters_size"""],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
lowerCamelCase__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 705 |
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
lowerCamelCase__ : Optional[Any] = pytest.mark.integration
lowerCamelCase__ : Union[str, Any] = {"""comet"""}
lowerCamelCase__ : Dict = importlib.util.find_spec("""fairseq""") is not None
lowerCamelCase__ : List[Any] = {"""code_eval"""}
lowerCamelCase__ : Tuple = os.name == """nt"""
lowerCamelCase__ : str = {"""bertscore""", """frugalscore""", """perplexity"""}
lowerCamelCase__ : List[str] = importlib.util.find_spec("""transformers""") is not None
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Union[str, Any]:
@wraps(__lowerCAmelCase )
def wrapper(self , __lowerCAmelCase ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest('''"test requires Fairseq"''' )
else:
test_case(self , __lowerCAmelCase )
return wrapper
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> List[Any]:
@wraps(__lowerCAmelCase )
def wrapper(self , __lowerCAmelCase ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest('''"test requires transformers"''' )
else:
test_case(self , __lowerCAmelCase )
return wrapper
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Any:
@wraps(__lowerCAmelCase )
def wrapper(self , __lowerCAmelCase ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest('''"test not supported on Windows"''' )
else:
test_case(self , __lowerCAmelCase )
return wrapper
def SCREAMING_SNAKE_CASE ( ) -> str:
snake_case__ = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob('''./metrics/*/''' )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
snake_case_ ,snake_case_ ,snake_case_ )
@local
class __magic_name__ (parameterized.TestCase ):
'''simple docstring'''
__lowercase : Tuple = {}
__lowercase : List[str] = None
@pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' )
@pytest.mark.filterwarnings('''ignore:load_metric is deprecated:FutureWarning''' )
def SCREAMING_SNAKE_CASE__ ( self:List[str] , _a:str ):
snake_case__ = '''[...]'''
snake_case__ = importlib.import_module(
datasets.load.metric_module_factory(os.path.join('''metrics''' , _a ) ).module_path )
snake_case__ = datasets.load.import_main_class(metric_module.__name__ , dataset=_a )
# check parameters
snake_case__ = inspect.signature(metric._compute ).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs
# run doctest
with self.patch_intensive_calls(_a , metric_module.__name__ ):
with self.use_local_metrics():
try:
snake_case__ = doctest.testmod(_a , verbose=_a , raise_on_error=_a )
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@slow
def SCREAMING_SNAKE_CASE__ ( self:List[str] , _a:List[str] ):
snake_case__ = '''[...]'''
snake_case__ = importlib.import_module(
datasets.load.metric_module_factory(os.path.join('''metrics''' , _a ) ).module_path )
# run doctest
with self.use_local_metrics():
snake_case__ = doctest.testmod(_a , verbose=_a , raise_on_error=_a )
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@contextmanager
def SCREAMING_SNAKE_CASE__ ( self:List[str] , _a:List[str] , _a:Optional[int] ):
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](_a ):
yield
else:
yield
@contextmanager
def SCREAMING_SNAKE_CASE__ ( self:str ):
def load_local_metric(_a:Dict , *_a:Optional[int] , **_a:Dict ):
return load_metric(os.path.join('''metrics''' , _a ) , *_a , **_a )
with patch('''datasets.load_metric''' ) as mock_load_metric:
snake_case__ = load_local_metric
yield
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls:Tuple , _a:Tuple ):
def wrapper(_a:Dict ):
snake_case__ = contextmanager(_a )
snake_case__ = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher('''bleurt''' )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Dict:
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string('''sv''' , '''''' , '''''' ) # handle pytest cli flags
class __magic_name__ (snake_case_ ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self:Any , _a:Union[str, Any] ):
assert len(input_dict['''input_ids'''] ) == 2
return np.array([1.03, 1.04] )
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch('''bleurt.score._create_predictor''' ) as mock_create_predictor:
snake_case__ = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher('''bertscore''' )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Optional[int]:
import torch
def bert_cos_score_idf(__lowerCAmelCase , __lowerCAmelCase , *__lowerCAmelCase , **__lowerCAmelCase ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(__lowerCAmelCase ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch('''bert_score.scorer.get_model''' ), patch(
'''bert_score.scorer.bert_cos_score_idf''' ) as mock_bert_cos_score_idf:
snake_case__ = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher('''comet''' )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Dict:
def load_from_checkpoint(__lowerCAmelCase ):
class __magic_name__ :
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] , _a:Union[str, Any] , *_a:Optional[Any] , **_a:Any ):
assert len(_a ) == 2
snake_case__ = [0.19, 0.92]
return scores, sum(_a ) / len(_a )
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch('''comet.download_model''' ) as mock_download_model:
snake_case__ = None
with patch('''comet.load_from_checkpoint''' ) as mock_load_from_checkpoint:
snake_case__ = load_from_checkpoint
yield
def SCREAMING_SNAKE_CASE ( ) -> int:
snake_case__ = load_metric(os.path.join('''metrics''' , '''seqeval''' ) )
snake_case__ = '''ERROR'''
snake_case__ = F"""Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}"""
with pytest.raises(__lowerCAmelCase , match=re.escape(__lowerCAmelCase ) ):
metric.compute(predictions=[] , references=[] , scheme=__lowerCAmelCase )
| 208 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.