code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"microsoft/beit-base-patch16-224-pt22k": (
"https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json"
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class SCREAMING_SNAKE_CASE ( snake_case ):
'''simple docstring'''
__UpperCamelCase = "beit"
def __init__( self , SCREAMING_SNAKE_CASE__=81_92 , SCREAMING_SNAKE_CASE__=7_68 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=30_72 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=2_24 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=[3, 5, 7, 11] , SCREAMING_SNAKE_CASE__=[1, 2, 3, 6] , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=0.4 , SCREAMING_SNAKE_CASE__=2_56 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=2_55 , **SCREAMING_SNAKE_CASE__ , ):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE__ )
snake_case: Optional[Any] = vocab_size
snake_case: int = hidden_size
snake_case: int = num_hidden_layers
snake_case: str = num_attention_heads
snake_case: str = intermediate_size
snake_case: Union[str, Any] = hidden_act
snake_case: List[Any] = hidden_dropout_prob
snake_case: Any = attention_probs_dropout_prob
snake_case: Union[str, Any] = initializer_range
snake_case: List[Any] = layer_norm_eps
snake_case: Tuple = image_size
snake_case: Dict = patch_size
snake_case: List[Any] = num_channels
snake_case: List[str] = use_mask_token
snake_case: Optional[Any] = use_absolute_position_embeddings
snake_case: str = use_relative_position_bias
snake_case: List[str] = use_shared_relative_position_bias
snake_case: Tuple = layer_scale_init_value
snake_case: Dict = drop_path_rate
snake_case: Union[str, Any] = use_mean_pooling
# decode head attributes (semantic segmentation)
snake_case: Any = out_indices
snake_case: Union[str, Any] = pool_scales
# auxiliary head attributes (semantic segmentation)
snake_case: Optional[Any] = use_auxiliary_head
snake_case: int = auxiliary_loss_weight
snake_case: Union[str, Any] = auxiliary_channels
snake_case: Union[str, Any] = auxiliary_num_convs
snake_case: Optional[int] = auxiliary_concat_input
snake_case: Dict = semantic_loss_ignore_index
class SCREAMING_SNAKE_CASE ( snake_case ):
'''simple docstring'''
__UpperCamelCase = version.parse("1.11" )
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
return 1E-4 | 715 |
'''simple docstring'''
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
__UpperCAmelCase = [
# tf -> hf
("/", "."),
("layer_", "layers."),
("kernel", "weight"),
("beta", "bias"),
("gamma", "weight"),
("pegasus", "model"),
]
__UpperCAmelCase = [
(".output.dense", ".fc2"),
("intermediate.LayerNorm", "final_layer_norm"),
("intermediate.dense", "fc1"),
]
__UpperCAmelCase = (
INIT_COMMON
+ [
("attention.self.LayerNorm", "self_attn_layer_norm"),
("attention.output.dense", "self_attn.out_proj"),
("attention.self", "self_attn"),
("attention.encdec.LayerNorm", "encoder_attn_layer_norm"),
("attention.encdec_output.dense", "encoder_attn.out_proj"),
("attention.encdec", "encoder_attn"),
("key", "k_proj"),
("value", "v_proj"),
("query", "q_proj"),
("decoder.LayerNorm", "decoder.layernorm_embedding"),
]
+ END_COMMON
)
__UpperCAmelCase = (
INIT_COMMON
+ [
("embeddings.word_embeddings", "shared.weight"),
("embeddings.position_embeddings", "embed_positions.weight"),
("attention.self.LayerNorm", "self_attn_layer_norm"),
("attention.output.dense", "self_attn.output"),
("attention.self", "self_attn.self"),
("encoder.LayerNorm", "encoder.layernorm_embedding"),
]
+ END_COMMON
)
__UpperCAmelCase = [
"encdec/key/bias",
"encdec/query/bias",
"encdec/value/bias",
"self/key/bias",
"self/query/bias",
"self/value/bias",
"encdec_output/dense/bias",
"attention/output/dense/bias",
]
def lowerCAmelCase_ ( __A : Dict , __A : List[Any] ):
'''simple docstring'''
for tf_name, hf_name in patterns:
snake_case: List[Any] = k.replace(__A , __A )
return k
def lowerCAmelCase_ ( __A : dict , __A : dict ):
'''simple docstring'''
snake_case: Optional[int] = BigBirdPegasusConfig(**__A )
snake_case: List[Any] = BigBirdPegasusForConditionalGeneration(__A )
snake_case: Any = torch_model.state_dict()
snake_case: Any = {}
# separating decoder weights
snake_case: Optional[Any] = {k: tf_weights[k] for k in tf_weights if k.startswith('pegasus/decoder' )}
snake_case: Any = {k: tf_weights[k] for k in tf_weights if not k.startswith('pegasus/decoder' )}
for k, v in tqdm(decoder_weights.items() , 'tf -> hf conversion' ):
snake_case: List[str] = [k.endswith(__A ) for ending in KEYS_TO_IGNORE]
if any(__A ):
continue
snake_case: Any = DECODER_PATTERNS
snake_case: int = rename_state_dict_key(__A , __A )
if new_k not in state_dict:
raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ['dense', 'query', 'key', 'value'] ):
snake_case: Optional[Any] = v.T
snake_case: Any = torch.from_numpy(__A )
assert v.shape == state_dict[new_k].shape, f"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
for k, v in tqdm(remaining_weights.items() , 'tf -> hf conversion' ):
snake_case: List[Any] = [k.endswith(__A ) for ending in KEYS_TO_IGNORE]
if any(__A ):
continue
snake_case: Union[str, Any] = REMAINING_PATTERNS
snake_case: str = rename_state_dict_key(__A , __A )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ['dense', 'query', 'key', 'value'] ):
snake_case: int = v.T
snake_case: Any = torch.from_numpy(__A )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, f"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
snake_case: str = mapping['model.embed_positions.weight']
snake_case: Any = mapping.pop('model.embed_positions.weight' )
snake_case , snake_case: Union[str, Any] = torch_model.load_state_dict(__A , strict=__A )
snake_case: Optional[int] = [
k
for k in missing
if k
not in [
'final_logits_bias',
'model.encoder.embed_tokens.weight',
'model.decoder.embed_tokens.weight',
'lm_head.weight',
]
]
assert unexpected_missing == [], f"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], f"""no matches found for the following tf keys {extra}"""
return torch_model
def lowerCAmelCase_ ( __A : Optional[int] ):
'''simple docstring'''
snake_case: Tuple = tf.train.list_variables(__A )
snake_case: str = {}
snake_case: List[str] = ['global_step']
for name, shape in tqdm(__A , desc='converting tf checkpoint to dict' ):
snake_case: str = any(pat in name for pat in ignore_name )
if skip_key:
continue
snake_case: Any = tf.train.load_variable(__A , __A )
snake_case: Optional[int] = array
return tf_weights
def lowerCAmelCase_ ( __A : str , __A : str , __A : dict ):
'''simple docstring'''
snake_case: int = get_tf_weights_as_numpy(__A )
snake_case: int = convert_bigbird_pegasus(__A , __A )
torch_model.save_pretrained(__A )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("--tf_ckpt_path", type=str, help="passed to tf.train.list_variables")
parser.add_argument("--save_dir", default=None, type=str, help="Path to the output PyTorch model.")
__UpperCAmelCase = parser.parse_args()
__UpperCAmelCase = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update) | 692 | 0 |
'''simple docstring'''
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print("Googling.....")
__UpperCAmelCase = "https://www.google.com/search?q=" + " ".join(sys.argv[1:])
__UpperCAmelCase = requests.get(url, headers={"UserAgent": UserAgent().random})
# res.raise_for_status()
with open("project1a.html", "wb") as out_file: # only for knowing the class
for data in res.iter_content(10_000):
out_file.write(data)
__UpperCAmelCase = BeautifulSoup(res.text, "html.parser")
__UpperCAmelCase = list(soup.select(".eZt8xd"))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get("href"))
else:
webbrowser.open(F'https://google.com{link.get("href")}') | 716 |
'''simple docstring'''
def lowerCAmelCase_ ( __A : List[str] ):
'''simple docstring'''
snake_case: str = [0] * len(__A )
snake_case: Tuple = []
snake_case: Tuple = [1] * len(__A )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(__A ) ):
if indegree[i] == 0:
queue.append(__A )
while queue:
snake_case: int = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
snake_case: Any = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(__A )
print(max(__A ) )
# Adjacency list of Graph
__UpperCAmelCase = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph) | 692 | 0 |
import math
def lowerCAmelCase_ ( __A : int = 1_00 ):
'''simple docstring'''
snake_case: int = sum(i * i for i in range(1 , n + 1 ) )
snake_case: Dict = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(F'{solution() = }') | 717 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[Any] = tempfile.mkdtemp()
snake_case: Optional[Any] = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'的',
'价',
'格',
'是',
'15',
'便',
'alex',
'##andra',
',',
'。',
'-',
't',
'shirt',
]
snake_case: Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
snake_case: Optional[int] = {
'do_resize': True,
'size': {'height': 2_24, 'width': 2_24},
'do_center_crop': True,
'crop_size': {'height': 18, 'width': 18},
'do_normalize': True,
'image_mean': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'image_std': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
'do_convert_rgb': True,
}
snake_case: Union[str, Any] = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE__ )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return BertTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[int] = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
snake_case: Tuple = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[Any] = self.get_tokenizer()
snake_case: Union[str, Any] = self.get_rust_tokenizer()
snake_case: Union[str, Any] = self.get_image_processor()
snake_case: List[str] = ChineseCLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
processor_slow.save_pretrained(self.tmpdirname )
snake_case: List[str] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=SCREAMING_SNAKE_CASE__ )
snake_case: Any = ChineseCLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
processor_fast.save_pretrained(self.tmpdirname )
snake_case: Optional[int] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(processor_fast.tokenizer , SCREAMING_SNAKE_CASE__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(processor_fast.image_processor , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[Any] = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
snake_case: Optional[int] = self.get_tokenizer(cls_token='(CLS)' , sep_token='(SEP)' )
snake_case: Union[str, Any] = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE__ )
snake_case: Union[str, Any] = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token='(CLS)' , sep_token='(SEP)' , do_normalize=SCREAMING_SNAKE_CASE__ )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[int] = self.get_image_processor()
snake_case: Tuple = self.get_tokenizer()
snake_case: Optional[Any] = ChineseCLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = self.prepare_image_inputs()
snake_case: List[Any] = image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='np' )
snake_case: Dict = processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[Any] = self.get_image_processor()
snake_case: Optional[int] = self.get_tokenizer()
snake_case: List[Any] = ChineseCLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = 'Alexandra,T-shirt的价格是15便士。'
snake_case: Union[str, Any] = processor(text=SCREAMING_SNAKE_CASE__ )
snake_case: Optional[Any] = tokenizer(SCREAMING_SNAKE_CASE__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: int = self.get_image_processor()
snake_case: Dict = self.get_tokenizer()
snake_case: Optional[int] = ChineseCLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
snake_case: Dict = 'Alexandra,T-shirt的价格是15便士。'
snake_case: Tuple = self.prepare_image_inputs()
snake_case: Any = processor(text=SCREAMING_SNAKE_CASE__ , images=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
processor()
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: str = self.get_image_processor()
snake_case: str = self.get_tokenizer()
snake_case: Union[str, Any] = ChineseCLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
snake_case: int = processor.batch_decode(SCREAMING_SNAKE_CASE__ )
snake_case: Dict = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Dict = self.get_image_processor()
snake_case: Dict = self.get_tokenizer()
snake_case: Optional[int] = ChineseCLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
snake_case: Tuple = 'Alexandra,T-shirt的价格是15便士。'
snake_case: List[Any] = self.prepare_image_inputs()
snake_case: Dict = processor(text=SCREAMING_SNAKE_CASE__ , images=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names ) | 692 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"""EleutherAI/gpt-j-6B""": """https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json""",
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class SCREAMING_SNAKE_CASE ( snake_case ):
'''simple docstring'''
__UpperCamelCase = "gptj"
__UpperCamelCase = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , SCREAMING_SNAKE_CASE__=5_04_00 , SCREAMING_SNAKE_CASE__=20_48 , SCREAMING_SNAKE_CASE__=40_96 , SCREAMING_SNAKE_CASE__=28 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=64 , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__="gelu_new" , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=1E-5 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=5_02_56 , SCREAMING_SNAKE_CASE__=5_02_56 , SCREAMING_SNAKE_CASE__=False , **SCREAMING_SNAKE_CASE__ , ):
'''simple docstring'''
snake_case: Optional[int] = vocab_size
snake_case: Any = n_positions
snake_case: Tuple = n_embd
snake_case: Tuple = n_layer
snake_case: List[str] = n_head
snake_case: Optional[Any] = n_inner
snake_case: Optional[Any] = rotary_dim
snake_case: Dict = activation_function
snake_case: int = resid_pdrop
snake_case: List[Any] = embd_pdrop
snake_case: Optional[Any] = attn_pdrop
snake_case: Optional[Any] = layer_norm_epsilon
snake_case: Union[str, Any] = initializer_range
snake_case: Any = use_cache
snake_case: Optional[int] = bos_token_id
snake_case: List[str] = eos_token_id
super().__init__(
bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , tie_word_embeddings=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
class SCREAMING_SNAKE_CASE ( snake_case ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = "default" , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = False , ):
'''simple docstring'''
super().__init__(SCREAMING_SNAKE_CASE__ , task=SCREAMING_SNAKE_CASE__ , patching_specs=SCREAMING_SNAKE_CASE__ , use_past=SCREAMING_SNAKE_CASE__ )
if not getattr(self._config , 'pad_token_id' , SCREAMING_SNAKE_CASE__ ):
# TODO: how to do that better?
snake_case: Optional[Any] = 0
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Tuple = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE__ , direction='inputs' )
snake_case: Any = {0: 'batch', 1: 'past_sequence + sequence'}
else:
snake_case: str = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
return self._config.n_layer
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
return self._config.n_head
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = None , ):
'''simple docstring'''
snake_case: Optional[int] = super(SCREAMING_SNAKE_CASE__ , self ).generate_dummy_inputs(
SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ , seq_length=SCREAMING_SNAKE_CASE__ , is_pair=SCREAMING_SNAKE_CASE__ , framework=SCREAMING_SNAKE_CASE__ )
# We need to order the input in the way they appears in the forward()
snake_case: Optional[int] = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
snake_case: int = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
snake_case: Any = seqlen + 2
snake_case: Any = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
snake_case: Dict = [
(torch.zeros(SCREAMING_SNAKE_CASE__ ), torch.zeros(SCREAMING_SNAKE_CASE__ )) for _ in range(self.num_layers )
]
snake_case: Optional[int] = common_inputs['attention_mask']
if self.use_past:
snake_case: List[Any] = ordered_inputs['attention_mask'].dtype
snake_case: str = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ )] , dim=1 )
return ordered_inputs
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
return 13 | 718 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"microsoft/swinv2-tiny-patch4-window8-256": (
"https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"
),
}
class SCREAMING_SNAKE_CASE ( snake_case ):
'''simple docstring'''
__UpperCamelCase = "swinv2"
__UpperCamelCase = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , SCREAMING_SNAKE_CASE__=2_24 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=96 , SCREAMING_SNAKE_CASE__=[2, 2, 6, 2] , SCREAMING_SNAKE_CASE__=[3, 6, 12, 24] , SCREAMING_SNAKE_CASE__=7 , SCREAMING_SNAKE_CASE__=4.0 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-5 , SCREAMING_SNAKE_CASE__=32 , **SCREAMING_SNAKE_CASE__ , ):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE__ )
snake_case: int = image_size
snake_case: Union[str, Any] = patch_size
snake_case: List[str] = num_channels
snake_case: Tuple = embed_dim
snake_case: str = depths
snake_case: Any = len(SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = num_heads
snake_case: Optional[int] = window_size
snake_case: Any = mlp_ratio
snake_case: Optional[int] = qkv_bias
snake_case: Union[str, Any] = hidden_dropout_prob
snake_case: List[str] = attention_probs_dropout_prob
snake_case: Dict = drop_path_rate
snake_case: List[str] = hidden_act
snake_case: int = use_absolute_embeddings
snake_case: Any = layer_norm_eps
snake_case: Dict = initializer_range
snake_case: List[Any] = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
snake_case: Tuple = int(embed_dim * 2 ** (len(SCREAMING_SNAKE_CASE__ ) - 1) )
snake_case: Union[str, Any] = (0, 0, 0, 0) | 692 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
__UpperCAmelCase = {
"configuration_gpt_neo": ["GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoConfig", "GPTNeoOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoForCausalLM",
"GPTNeoForQuestionAnswering",
"GPTNeoForSequenceClassification",
"GPTNeoForTokenClassification",
"GPTNeoModel",
"GPTNeoPreTrainedModel",
"load_tf_weights_in_gpt_neo",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"FlaxGPTNeoForCausalLM",
"FlaxGPTNeoModel",
"FlaxGPTNeoPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 719 |
'''simple docstring'''
import os
import sys
import unittest
__UpperCAmelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__UpperCAmelCase = os.path.join(git_repo_path, "src", "transformers")
__UpperCAmelCase = "\n{0} = None\n"
__UpperCAmelCase = "\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n"
__UpperCAmelCase = "\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n"
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[str] = find_backend(' _import_structure["models.albert"].append("AlbertTokenizerFast")' )
self.assertIsNone(SCREAMING_SNAKE_CASE__ )
snake_case: List[str] = find_backend(' if not is_tokenizers_available():' )
self.assertEqual(SCREAMING_SNAKE_CASE__ , 'tokenizers' )
snake_case: List[Any] = find_backend(' if not is_tensorflow_text_available():' )
self.assertEqual(SCREAMING_SNAKE_CASE__ , 'tensorflow_text' )
snake_case: int = find_backend(' if not (is_sentencepiece_available() and is_tokenizers_available()):' )
self.assertEqual(SCREAMING_SNAKE_CASE__ , 'sentencepiece_and_tokenizers' )
snake_case: Optional[Any] = find_backend(
' if not (is_sentencepiece_available() and is_tensorflow_text_available()):' )
self.assertEqual(SCREAMING_SNAKE_CASE__ , 'sentencepiece_and_tensorflow_text' )
snake_case: Dict = find_backend(
' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):' )
self.assertEqual(SCREAMING_SNAKE_CASE__ , 'sentencepiece_and_tokenizers_and_vision' )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: str = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('torch' , SCREAMING_SNAKE_CASE__ )
self.assertIn('tensorflow_text' , SCREAMING_SNAKE_CASE__ )
self.assertIn('sentencepiece_and_tokenizers' , SCREAMING_SNAKE_CASE__ )
# Likewise, we can't assert on the exact content of a key
self.assertIn('BertModel' , objects['torch'] )
self.assertIn('TFBertModel' , objects['tf'] )
self.assertIn('FlaxBertModel' , objects['flax'] )
self.assertIn('BertModel' , objects['torch'] )
self.assertIn('TFBertTokenizer' , objects['tensorflow_text'] )
self.assertIn('convert_slow_tokenizer' , objects['sentencepiece_and_tokenizers'] )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Dict = create_dummy_object('CONSTANT' , '\'torch\'' )
self.assertEqual(SCREAMING_SNAKE_CASE__ , '\nCONSTANT = None\n' )
snake_case: Any = create_dummy_object('function' , '\'torch\'' )
self.assertEqual(
SCREAMING_SNAKE_CASE__ , '\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n' )
snake_case: Optional[int] = '\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n'
snake_case: Tuple = create_dummy_object('FakeClass' , '\'torch\'' )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Dict = '# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, ["torch"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = ["torch"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, ["torch"])\n'
snake_case: Optional[int] = create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']} )
self.assertEqual(dummy_files['torch'] , SCREAMING_SNAKE_CASE__ ) | 692 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
__UpperCamelCase = 42
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCAmelCase = namedtuple("CoinsDistribResult", "moves excess")
def lowerCAmelCase_ ( __A : TreeNode | None ):
'''simple docstring'''
if root is None:
return 0
# Validation
def count_nodes(__A : TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(__A : TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(__A ) != count_coins(__A ):
raise ValueError('The nodes number should be same as the number of coins' )
# Main calculation
def get_distrib(__A : TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
snake_case: List[Any] = get_distrib(node.left )
snake_case: Any = get_distrib(node.right )
snake_case: Dict = 1 - left_distrib_excess
snake_case: Any = 1 - right_distrib_excess
snake_case: List[Any] = (
left_distrib_moves
+ right_distrib_moves
+ abs(__A )
+ abs(__A )
)
snake_case: List[str] = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(__A , __A )
return get_distrib(__A )[0]
if __name__ == "__main__":
import doctest
doctest.testmod() | 720 |
'''simple docstring'''
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
__UpperCAmelCase = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: Dict = question_encoder
snake_case: Union[str, Any] = generator
snake_case: Optional[int] = self.question_encoder
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if os.path.isfile(SCREAMING_SNAKE_CASE__ ):
raise ValueError(F"""Provided path ({save_directory}) should be a directory, not a file""" )
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
snake_case: Tuple = os.path.join(SCREAMING_SNAKE_CASE__ , 'question_encoder_tokenizer' )
snake_case: Tuple = os.path.join(SCREAMING_SNAKE_CASE__ , 'generator_tokenizer' )
self.question_encoder.save_pretrained(SCREAMING_SNAKE_CASE__ )
self.generator.save_pretrained(SCREAMING_SNAKE_CASE__ )
@classmethod
def _UpperCamelCase ( cls , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
from ..auto.tokenization_auto import AutoTokenizer
snake_case: int = kwargs.pop('config' , SCREAMING_SNAKE_CASE__ )
if config is None:
snake_case: str = RagConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = AutoTokenizer.from_pretrained(
SCREAMING_SNAKE_CASE__ , config=config.question_encoder , subfolder='question_encoder_tokenizer' )
snake_case: Dict = AutoTokenizer.from_pretrained(
SCREAMING_SNAKE_CASE__ , config=config.generator , subfolder='generator_tokenizer' )
return cls(question_encoder=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ )
def __call__( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return self.current_tokenizer(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return self.generator.batch_decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return self.generator.decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Union[str, Any] = self.question_encoder
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Dict = self.generator
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = "longest" , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = True , **SCREAMING_SNAKE_CASE__ , ):
'''simple docstring'''
warnings.warn(
'`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the '
'regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` '
'context manager to prepare your targets. See the documentation of your specific tokenizer for more '
'details' , SCREAMING_SNAKE_CASE__ , )
if max_length is None:
snake_case: Optional[Any] = self.current_tokenizer.model_max_length
snake_case: int = self(
SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
snake_case: Any = self.current_tokenizer.model_max_length
snake_case: List[str] = self(
text_target=SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
snake_case: Dict = labels['input_ids']
return model_inputs | 692 | 0 |
'''simple docstring'''
from __future__ import annotations
def lowerCAmelCase_ ( __A : list[int] ):
'''simple docstring'''
return len(set(__A ) ) == len(__A )
if __name__ == "__main__":
import doctest
doctest.testmod() | 721 |
'''simple docstring'''
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def lowerCAmelCase_ ( __A : Tuple ):
'''simple docstring'''
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def lowerCAmelCase_ ( ):
'''simple docstring'''
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def lowerCAmelCase_ ( ):
'''simple docstring'''
snake_case: Union[str, Any] = 'mock-s3-bucket'
snake_case: int = f"""s3://{mock_bucket}"""
snake_case: Any = extract_path_from_uri(__A )
assert dataset_path.startswith('s3://' ) is False
snake_case: Union[str, Any] = './local/path'
snake_case: Union[str, Any] = extract_path_from_uri(__A )
assert dataset_path == new_dataset_path
def lowerCAmelCase_ ( __A : Any ):
'''simple docstring'''
snake_case: List[str] = is_remote_filesystem(__A )
assert is_remote is True
snake_case: int = fsspec.filesystem('file' )
snake_case: int = is_remote_filesystem(__A )
assert is_remote is False
@pytest.mark.parametrize('compression_fs_class' , __A )
def lowerCAmelCase_ ( __A : Optional[int] , __A : int , __A : str , __A : Optional[Any] , __A : List[str] , __A : Optional[Any] , __A : Optional[int] ):
'''simple docstring'''
snake_case: Optional[Any] = {'gzip': gz_file, 'xz': xz_file, 'zstd': zstd_file, 'bz2': bza_file, 'lz4': lza_file}
snake_case: Optional[int] = input_paths[compression_fs_class.protocol]
if input_path is None:
snake_case: str = f"""for '{compression_fs_class.protocol}' compression protocol, """
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__A )
snake_case: List[str] = fsspec.filesystem(compression_fs_class.protocol , fo=__A )
assert isinstance(__A , __A )
snake_case: Any = os.path.basename(__A )
snake_case: int = expected_filename[: expected_filename.rindex('.' )]
assert fs.glob('*' ) == [expected_filename]
with fs.open(__A , 'r' , encoding='utf-8' ) as f, open(__A , encoding='utf-8' ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize('protocol' , ['zip', 'gzip'] )
def lowerCAmelCase_ ( __A : Any , __A : int , __A : int ):
'''simple docstring'''
snake_case: List[str] = {'zip': zip_jsonl_path, 'gzip': jsonl_gz_path}
snake_case: str = compressed_file_paths[protocol]
snake_case: Dict = 'dataset.jsonl'
snake_case: Optional[Any] = f"""{protocol}://{member_file_path}::{compressed_file_path}"""
snake_case , *snake_case: List[Any] = fsspec.get_fs_token_paths(__A )
assert fs.isfile(__A )
assert not fs.isfile('non_existing_' + member_file_path )
@pytest.mark.integration
def lowerCAmelCase_ ( __A : Optional[Any] , __A : Union[str, Any] , __A : Union[str, Any] , __A : List[Any] ):
'''simple docstring'''
snake_case: Tuple = hf_api.dataset_info(__A , token=__A )
snake_case: List[str] = HfFileSystem(repo_info=__A , token=__A )
assert sorted(hffs.glob('*' ) ) == [".gitattributes", "data"]
assert hffs.isdir('data' )
assert hffs.isfile('.gitattributes' ) and hffs.isfile('data/text_data.txt' )
with open(__A ) as f:
assert hffs.open('data/text_data.txt' , 'r' ).read() == f.read()
def lowerCAmelCase_ ( ):
'''simple docstring'''
snake_case: Union[str, Any] = 'bz2'
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(__A , __A , clobber=__A )
with pytest.warns(__A ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(__A ) == 1
assert (
str(warning_info[0].message )
== f"""A filesystem protocol was already set for {protocol} and will be overwritten."""
) | 692 | 0 |
from functools import lru_cache
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Dict = 2
lowerCAmelCase : List[str] = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(SCREAMING_SNAKE_CASE__ )
if n > 1:
factors.add(SCREAMING_SNAKE_CASE__ )
return factors
@lru_cache
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return len(unique_prime_factors(SCREAMING_SNAKE_CASE__ ) )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return len(set(SCREAMING_SNAKE_CASE__ ) ) in (0, 1)
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Tuple = 2
while True:
# Increment each value of a generated range
lowerCAmelCase : Union[str, Any] = [base + i for i in range(SCREAMING_SNAKE_CASE__ )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
lowerCAmelCase : str = [upf_len(SCREAMING_SNAKE_CASE__ ) for x in group]
checker.append(SCREAMING_SNAKE_CASE__ )
# If all numbers in the list are equal, return the group variable.
if equality(SCREAMING_SNAKE_CASE__ ):
return group
# Increment our base variable by 1
base += 1
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ = 4 ):
'''simple docstring'''
lowerCAmelCase : List[Any] = run(SCREAMING_SNAKE_CASE__ )
return results[0] if len(SCREAMING_SNAKE_CASE__ ) else None
if __name__ == "__main__":
print(solution())
| 693 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase : int =logging.get_logger(__name__)
lowerCAmelCase : List[Any] ={
'microsoft/swin-tiny-patch4-window7-224': (
'https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json'
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class _a ( snake_case_ , snake_case_ ):
_UpperCamelCase: int = "swin"
_UpperCamelCase: str = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , lowercase_=224 , lowercase_=4 , lowercase_=3 , lowercase_=96 , lowercase_=[2, 2, 6, 2] , lowercase_=[3, 6, 12, 24] , lowercase_=7 , lowercase_=4.0 , lowercase_=True , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.1 , lowercase_="gelu" , lowercase_=False , lowercase_=0.0_2 , lowercase_=1e-5 , lowercase_=32 , lowercase_=None , lowercase_=None , **lowercase_ , ) -> Tuple:
super().__init__(**lowercase_ )
lowerCAmelCase : Optional[int] = image_size
lowerCAmelCase : Optional[Any] = patch_size
lowerCAmelCase : Optional[Any] = num_channels
lowerCAmelCase : List[Any] = embed_dim
lowerCAmelCase : str = depths
lowerCAmelCase : List[str] = len(lowercase_ )
lowerCAmelCase : Any = num_heads
lowerCAmelCase : str = window_size
lowerCAmelCase : List[str] = mlp_ratio
lowerCAmelCase : List[Any] = qkv_bias
lowerCAmelCase : List[str] = hidden_dropout_prob
lowerCAmelCase : int = attention_probs_dropout_prob
lowerCAmelCase : Any = drop_path_rate
lowerCAmelCase : int = hidden_act
lowerCAmelCase : int = use_absolute_embeddings
lowerCAmelCase : Dict = layer_norm_eps
lowerCAmelCase : Any = initializer_range
lowerCAmelCase : Dict = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCAmelCase : Any = int(embed_dim * 2 ** (len(lowercase_ ) - 1) )
lowerCAmelCase : Dict = ["""stem"""] + [f"""stage{idx}""" for idx in range(1 , len(lowercase_ ) + 1 )]
lowerCAmelCase , lowerCAmelCase : Optional[Any] = get_aligned_output_features_output_indices(
out_features=lowercase_ , out_indices=lowercase_ , stage_names=self.stage_names )
class _a ( snake_case_ ):
_UpperCamelCase: int = version.parse("1.11" )
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def _snake_case ( self ) -> float:
return 1e-4
| 693 | 1 |
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class _a ( unittest.TestCase ):
def _snake_case ( self , lowercase_ , lowercase_ ) -> Optional[Any]:
return f"""gaussian_noise_s={seed}_shape={'_'.join([str(lowercase_ ) for s in shape] )}.npy"""
def _snake_case ( self ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def _snake_case ( self , lowercase_=0 , lowercase_=(4, 4, 64, 64) , lowercase_=False ) -> Optional[Any]:
lowerCAmelCase : Optional[int] = jnp.bfloataa if fpaa else jnp.floataa
lowerCAmelCase : int = jnp.array(load_hf_numpy(self.get_file_format(lowercase_ , lowercase_ ) ) , dtype=lowercase_ )
return image
def _snake_case ( self , lowercase_=False , lowercase_="CompVis/stable-diffusion-v1-4" ) -> Tuple:
lowerCAmelCase : Optional[Any] = jnp.bfloataa if fpaa else jnp.floataa
lowerCAmelCase : Optional[Any] = """bf16""" if fpaa else None
lowerCAmelCase , lowerCAmelCase : Optional[Any] = FlaxUNetaDConditionModel.from_pretrained(
lowercase_ , subfolder="""unet""" , dtype=lowercase_ , revision=lowercase_ )
return model, params
def _snake_case ( self , lowercase_=0 , lowercase_=(4, 77, 768) , lowercase_=False ) -> Dict:
lowerCAmelCase : Union[str, Any] = jnp.bfloataa if fpaa else jnp.floataa
lowerCAmelCase : Any = jnp.array(load_hf_numpy(self.get_file_format(lowercase_ , lowercase_ ) ) , dtype=lowercase_ )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2_3_2_3, -0.1_3_0_4, 0.0_8_1_3, -0.3_0_9_3, -0.0_9_1_9, -0.1_5_7_1, -0.1_1_2_5, -0.5_8_0_6]],
[17, 0.5_5, [-0.0_8_3_1, -0.2_4_4_3, 0.0_9_0_1, -0.0_9_1_9, 0.3_3_9_6, 0.0_1_0_3, -0.3_7_4_3, 0.0_7_0_1]],
[8, 0.8_9, [-0.4_8_6_3, 0.0_8_5_9, 0.0_8_7_5, -0.1_6_5_8, 0.9_1_9_9, -0.0_1_1_4, 0.4_8_3_9, 0.4_6_3_9]],
[3, 1000, [-0.5_6_4_9, 0.2_4_0_2, -0.5_5_1_8, 0.1_2_4_8, 1.1_3_2_8, -0.2_4_4_3, -0.0_3_2_5, -1.0_0_7_8]],
# fmt: on
] )
def _snake_case ( self , lowercase_ , lowercase_ , lowercase_ ) -> int:
lowerCAmelCase , lowerCAmelCase : List[Any] = self.get_unet_model(model_id="""CompVis/stable-diffusion-v1-4""" , fpaa=lowercase_ )
lowerCAmelCase : List[Any] = self.get_latents(lowercase_ , fpaa=lowercase_ )
lowerCAmelCase : Optional[Any] = self.get_encoder_hidden_states(lowercase_ , fpaa=lowercase_ )
lowerCAmelCase : Union[str, Any] = model.apply(
{"""params""": params} , lowercase_ , jnp.array(lowercase_ , dtype=jnp.intaa ) , encoder_hidden_states=lowercase_ , ).sample
assert sample.shape == latents.shape
lowerCAmelCase : List[str] = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
lowerCAmelCase : Any = jnp.array(lowercase_ , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(lowercase_ , lowercase_ , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1_5_1_4, 0.0_8_0_7, 0.1_6_2_4, 0.1_0_1_6, -0.1_8_9_6, 0.0_2_6_3, 0.0_6_7_7, 0.2_3_1_0]],
[17, 0.5_5, [0.1_1_6_4, -0.0_2_1_6, 0.0_1_7_0, 0.1_5_8_9, -0.3_1_2_0, 0.1_0_0_5, -0.0_5_8_1, -0.1_4_5_8]],
[8, 0.8_9, [-0.1_7_5_8, -0.0_1_6_9, 0.1_0_0_4, -0.1_4_1_1, 0.1_3_1_2, 0.1_1_0_3, -0.1_9_9_6, 0.2_1_3_9]],
[3, 1000, [0.1_2_1_4, 0.0_3_5_2, -0.0_7_3_1, -0.1_5_6_2, -0.0_9_9_4, -0.0_9_0_6, -0.2_3_4_0, -0.0_5_3_9]],
# fmt: on
] )
def _snake_case ( self , lowercase_ , lowercase_ , lowercase_ ) -> Optional[int]:
lowerCAmelCase , lowerCAmelCase : List[Any] = self.get_unet_model(model_id="""stabilityai/stable-diffusion-2""" , fpaa=lowercase_ )
lowerCAmelCase : List[str] = self.get_latents(lowercase_ , shape=(4, 4, 96, 96) , fpaa=lowercase_ )
lowerCAmelCase : Optional[int] = self.get_encoder_hidden_states(lowercase_ , shape=(4, 77, 1024) , fpaa=lowercase_ )
lowerCAmelCase : Optional[Any] = model.apply(
{"""params""": params} , lowercase_ , jnp.array(lowercase_ , dtype=jnp.intaa ) , encoder_hidden_states=lowercase_ , ).sample
assert sample.shape == latents.shape
lowerCAmelCase : Optional[Any] = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
lowerCAmelCase : List[str] = jnp.array(lowercase_ , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(lowercase_ , lowercase_ , atol=1e-2 )
| 693 |
lowerCAmelCase : str ={
'Pillow': 'Pillow<10.0.0',
'accelerate': 'accelerate>=0.20.3',
'av': 'av==9.2.0',
'beautifulsoup4': 'beautifulsoup4',
'black': 'black~=23.1',
'codecarbon': 'codecarbon==1.2.0',
'cookiecutter': 'cookiecutter==1.7.3',
'dataclasses': 'dataclasses',
'datasets': 'datasets!=2.5.0',
'decord': 'decord==0.6.0',
'deepspeed': 'deepspeed>=0.9.3',
'diffusers': 'diffusers',
'dill': 'dill<0.3.5',
'evaluate': 'evaluate>=0.2.0',
'fairscale': 'fairscale>0.3',
'faiss-cpu': 'faiss-cpu',
'fastapi': 'fastapi',
'filelock': 'filelock',
'flax': 'flax>=0.4.1,<=0.7.0',
'ftfy': 'ftfy',
'fugashi': 'fugashi>=1.0',
'GitPython': 'GitPython<3.1.19',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.14.1,<1.0',
'importlib_metadata': 'importlib_metadata',
'ipadic': 'ipadic>=1.0.0,<2.0',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2,<=0.4.13',
'jaxlib': 'jaxlib>=0.1.65,<=0.4.13',
'jieba': 'jieba',
'kenlm': 'kenlm',
'keras-nlp': 'keras-nlp>=0.3.1',
'librosa': 'librosa',
'nltk': 'nltk',
'natten': 'natten>=0.14.6',
'numpy': 'numpy>=1.17',
'onnxconverter-common': 'onnxconverter-common',
'onnxruntime-tools': 'onnxruntime-tools>=1.4.2',
'onnxruntime': 'onnxruntime>=1.4.0',
'opencv-python': 'opencv-python',
'optuna': 'optuna',
'optax': 'optax>=0.0.8,<=0.1.4',
'packaging': 'packaging>=20.0',
'parameterized': 'parameterized',
'phonemizer': 'phonemizer',
'protobuf': 'protobuf',
'psutil': 'psutil',
'pyyaml': 'pyyaml>=5.1',
'pydantic': 'pydantic<2',
'pytest': 'pytest>=7.2.0',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'python': 'python>=3.8.0',
'ray[tune]': 'ray[tune]',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'rhoknp': 'rhoknp>=1.1.0,<1.3.1',
'rjieba': 'rjieba',
'rouge-score': 'rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1',
'ruff': 'ruff>=0.0.241,<=0.0.259',
'sacrebleu': 'sacrebleu>=1.4.12,<2.0.0',
'sacremoses': 'sacremoses',
'safetensors': 'safetensors>=0.3.1',
'sagemaker': 'sagemaker>=2.31.0',
'scikit-learn': 'scikit-learn',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'sigopt': 'sigopt',
'starlette': 'starlette',
'sudachipy': 'sudachipy>=0.6.6',
'sudachidict_core': 'sudachidict_core>=20220729',
'tensorflow-cpu': 'tensorflow-cpu>=2.6,<2.14',
'tensorflow': 'tensorflow>=2.6,<2.14',
'tensorflow-text': 'tensorflow-text<2.14',
'tf2onnx': 'tf2onnx',
'timeout-decorator': 'timeout-decorator',
'timm': 'timm',
'tokenizers': 'tokenizers>=0.11.1,!=0.11.3,<0.14',
'torch': 'torch>=1.9,!=1.12.0',
'torchaudio': 'torchaudio',
'torchvision': 'torchvision',
'pyctcdecode': 'pyctcdecode>=0.4.0',
'tqdm': 'tqdm>=4.27',
'unidic': 'unidic>=1.0.2',
'unidic_lite': 'unidic_lite>=1.0.7',
'urllib3': 'urllib3<2.0.0',
'uvicorn': 'uvicorn',
}
| 693 | 1 |
from __future__ import annotations
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ): # This function is recursive
'''simple docstring'''
lowerCAmelCase : List[Any] = len(SCREAMING_SNAKE_CASE__ )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
lowerCAmelCase : Tuple = array[0]
lowerCAmelCase : Union[str, Any] = False
lowerCAmelCase : int = 1
lowerCAmelCase : list[int] = []
while not is_found and i < array_length:
if array[i] < pivot:
lowerCAmelCase : Union[str, Any] = True
lowerCAmelCase : str = [element for element in array[i:] if element >= array[i]]
lowerCAmelCase : Optional[int] = longest_subsequence(SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > len(SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase : Tuple = temp_array
else:
i += 1
lowerCAmelCase : List[Any] = [element for element in array[1:] if element >= pivot]
lowerCAmelCase : List[str] = [pivot, *longest_subsequence(SCREAMING_SNAKE_CASE__ )]
if len(SCREAMING_SNAKE_CASE__ ) > len(SCREAMING_SNAKE_CASE__ ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 693 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase : Union[str, Any] ={
'configuration_roformer': ['ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoFormerConfig', 'RoFormerOnnxConfig'],
'tokenization_roformer': ['RoFormerTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Dict =['RoFormerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[int] =[
'ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoFormerForCausalLM',
'RoFormerForMaskedLM',
'RoFormerForMultipleChoice',
'RoFormerForQuestionAnswering',
'RoFormerForSequenceClassification',
'RoFormerForTokenClassification',
'RoFormerLayer',
'RoFormerModel',
'RoFormerPreTrainedModel',
'load_tf_weights_in_roformer',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Tuple =[
'TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRoFormerForCausalLM',
'TFRoFormerForMaskedLM',
'TFRoFormerForMultipleChoice',
'TFRoFormerForQuestionAnswering',
'TFRoFormerForSequenceClassification',
'TFRoFormerForTokenClassification',
'TFRoFormerLayer',
'TFRoFormerModel',
'TFRoFormerPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : int =[
'FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'FlaxRoFormerForMaskedLM',
'FlaxRoFormerForMultipleChoice',
'FlaxRoFormerForQuestionAnswering',
'FlaxRoFormerForSequenceClassification',
'FlaxRoFormerForTokenClassification',
'FlaxRoFormerModel',
'FlaxRoFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Tuple =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 693 | 1 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : str = [1]
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : List[str] = 0, 0, 0
lowerCAmelCase : Tuple = ugly_nums[ia] * 2
lowerCAmelCase : int = ugly_nums[ia] * 3
lowerCAmelCase : int = ugly_nums[ia] * 5
for _ in range(1 ,SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase : List[Any] = min(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
ugly_nums.append(SCREAMING_SNAKE_CASE__ )
if next_num == next_a:
ia += 1
lowerCAmelCase : Union[str, Any] = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
lowerCAmelCase : Any = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
lowerCAmelCase : Any = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(F'''{ugly_numbers(200) = }''')
| 693 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return int(input_a == input_a == 0 )
def _UpperCAmelCase ( ):
'''simple docstring'''
print("""Truth Table of NOR Gate:""" )
print("""| Input 1 | Input 2 | Output |""" )
print(F"""| 0 | 0 | {nor_gate(0 ,0 )} |""" )
print(F"""| 0 | 1 | {nor_gate(0 ,1 )} |""" )
print(F"""| 1 | 0 | {nor_gate(1 ,0 )} |""" )
print(F"""| 1 | 1 | {nor_gate(1 ,1 )} |""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 693 | 1 |
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('0.12.2'):
raise Exception('requires fairseq >= 0.12.2')
if version.parse(fairseq.__version__) > version.parse('2'):
raise Exception('requires fairseq < v2')
logging.set_verbosity_info()
lowerCAmelCase : str =logging.get_logger(__name__)
lowerCAmelCase : Union[str, Any] ='Hello, World!'
lowerCAmelCase : Tuple ='en_XX'
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : List[str] = Path("""data_bin""" )
lowerCAmelCase : Optional[Any] = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(SCREAMING_SNAKE_CASE__ ).parent ) ,checkpoint_file=Path(SCREAMING_SNAKE_CASE__ ).name ,_name="""xmod_base""" ,arch="""xmod_base""" ,task="""multilingual_masked_lm""" ,data_name_or_path=str(SCREAMING_SNAKE_CASE__ ) ,bpe="""sentencepiece""" ,sentencepiece_model=str(Path(SCREAMING_SNAKE_CASE__ ).parent / """sentencepiece.bpe.model""" ) ,src_dict=str(data_dir / """dict.txt""" ) ,)
xmod.eval() # disable dropout
print(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Optional[Any] = xmod.model.encoder.sentence_encoder
lowerCAmelCase : int = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings ,hidden_size=xmod.cfg.model.encoder_embed_dim ,num_hidden_layers=xmod.cfg.model.encoder_layers ,num_attention_heads=xmod.cfg.model.encoder_attention_heads ,intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim ,max_position_embeddings=5_1_4 ,type_vocab_size=1 ,layer_norm_eps=1e-5 ,pre_norm=xmod.cfg.model.encoder_normalize_before ,adapter_reduction_factor=getattr(xmod.cfg.model ,"""bottleneck""" ,2 ) ,adapter_layer_norm=xmod.cfg.model.adapter_layer_norm ,adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm ,ln_before_adapter=xmod.cfg.model.ln_before_adapter ,languages=xmod.cfg.model.languages ,)
if classification_head:
lowerCAmelCase : Any = xmod.model.classification_heads["""mnli"""].out_proj.weight.shape[0]
print("""Our X-MOD config:""" ,SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : str = XmodForSequenceClassification(SCREAMING_SNAKE_CASE__ ) if classification_head else XmodForMaskedLM(SCREAMING_SNAKE_CASE__ )
model.eval()
# Now let's copy all the weights.
# Embeddings
lowerCAmelCase : str = xmod_sent_encoder.embed_tokens.weight
lowerCAmelCase : Any = xmod_sent_encoder.embed_positions.weight
lowerCAmelCase : Union[str, Any] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
lowerCAmelCase : List[Any] = xmod_sent_encoder.layernorm_embedding.weight
lowerCAmelCase : int = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
lowerCAmelCase : List[Any] = model.roberta.encoder.layer[i]
lowerCAmelCase : Dict = xmod_sent_encoder.layers[i]
# self attention
lowerCAmelCase : Tuple = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError("""Dimensions of self-attention weights do not match.""" )
lowerCAmelCase : str = xmod_layer.self_attn.q_proj.weight
lowerCAmelCase : Any = xmod_layer.self_attn.q_proj.bias
lowerCAmelCase : Tuple = xmod_layer.self_attn.k_proj.weight
lowerCAmelCase : Dict = xmod_layer.self_attn.k_proj.bias
lowerCAmelCase : Dict = xmod_layer.self_attn.v_proj.weight
lowerCAmelCase : int = xmod_layer.self_attn.v_proj.bias
# self-attention output
lowerCAmelCase : List[str] = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError("""Dimensions of self-attention output weights do not match.""" )
lowerCAmelCase : Union[str, Any] = xmod_layer.self_attn.out_proj.weight
lowerCAmelCase : str = xmod_layer.self_attn.out_proj.bias
lowerCAmelCase : Optional[int] = xmod_layer.self_attn_layer_norm.weight
lowerCAmelCase : int = xmod_layer.self_attn_layer_norm.bias
# intermediate
lowerCAmelCase : int = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("""Dimensions of intermediate weights do not match.""" )
lowerCAmelCase : Tuple = xmod_layer.fca.weight
lowerCAmelCase : str = xmod_layer.fca.bias
# output
lowerCAmelCase : Any = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("""Dimensions of feed-forward weights do not match.""" )
lowerCAmelCase : Any = xmod_layer.fca.weight
lowerCAmelCase : Optional[int] = xmod_layer.fca.bias
lowerCAmelCase : Tuple = xmod_layer.final_layer_norm.weight
lowerCAmelCase : Tuple = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
lowerCAmelCase : Union[str, Any] = xmod_layer.adapter_layer_norm.weight
lowerCAmelCase : int = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError("""Lists of language adapters do not match.""" )
for lang_code, adapter in xmod_layer.adapter_modules.items():
lowerCAmelCase : Any = bert_output.adapter_modules[lang_code]
lowerCAmelCase : str = xmod_layer.adapter_modules[lang_code]
lowerCAmelCase : Tuple = from_adapter.fca.weight
lowerCAmelCase : Optional[int] = from_adapter.fca.bias
lowerCAmelCase : List[Any] = from_adapter.fca.weight
lowerCAmelCase : Any = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
lowerCAmelCase : Union[str, Any] = xmod_sent_encoder.layer_norm.weight
lowerCAmelCase : Tuple = xmod_sent_encoder.layer_norm.bias
if classification_head:
lowerCAmelCase : List[str] = xmod.model.classification_heads["""mnli"""].dense.weight
lowerCAmelCase : Any = xmod.model.classification_heads["""mnli"""].dense.bias
lowerCAmelCase : List[Any] = xmod.model.classification_heads["""mnli"""].out_proj.weight
lowerCAmelCase : int = xmod.model.classification_heads["""mnli"""].out_proj.bias
else:
# LM Head
lowerCAmelCase : Any = xmod.model.encoder.lm_head.dense.weight
lowerCAmelCase : Union[str, Any] = xmod.model.encoder.lm_head.dense.bias
lowerCAmelCase : int = xmod.model.encoder.lm_head.layer_norm.weight
lowerCAmelCase : List[Any] = xmod.model.encoder.lm_head.layer_norm.bias
lowerCAmelCase : Optional[int] = xmod.model.encoder.lm_head.weight
lowerCAmelCase : Any = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
lowerCAmelCase : Any = xmod.encode(SCREAMING_SNAKE_CASE__ ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Tuple = model(SCREAMING_SNAKE_CASE__ )[0]
if classification_head:
lowerCAmelCase : Tuple = xmod.model.classification_heads["""mnli"""](xmod.extract_features(SCREAMING_SNAKE_CASE__ ) )
else:
lowerCAmelCase : Optional[Any] = xmod.model(SCREAMING_SNAKE_CASE__ ,lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape ,their_output.shape )
lowerCAmelCase : List[Any] = torch.max(torch.abs(our_output - their_output ) ).item()
print(F"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
lowerCAmelCase : Tuple = torch.allclose(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,atol=1e-3 )
print("""Do both models output the same tensors?""" ,"""🔥""" if success else """💩""" )
if not success:
raise Exception("""Something went wRoNg""" )
Path(SCREAMING_SNAKE_CASE__ ).mkdir(parents=SCREAMING_SNAKE_CASE__ ,exist_ok=SCREAMING_SNAKE_CASE__ )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
lowerCAmelCase : Dict =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--xmod_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--classification_head', action='store_true', help='Whether to convert a final classification head.'
)
lowerCAmelCase : int =parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 693 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase : int ={
'configuration_poolformer': [
'POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'PoolFormerConfig',
'PoolFormerOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[Any] =['PoolFormerFeatureExtractor']
lowerCAmelCase : List[str] =['PoolFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Tuple =[
'POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PoolFormerForImageClassification',
'PoolFormerModel',
'PoolFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Any =_LazyModule(__name__, globals()['__file__'], _import_structure)
| 693 | 1 |
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class _a ( snake_case_ ):
def _snake_case ( self ) -> int:
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase : int = {"""col_1""": [3, 2, 1, 0], """col_2""": ["""a""", """b""", """c""", """d"""]}
return Dataset.from_dict(lowercase_ )
def _snake_case ( self ) -> Any:
lowerCAmelCase : Union[str, Any] = self._create_example_records()
lowerCAmelCase : Optional[int] = Dataset.from_list(lowercase_ )
self.assertListEqual(dset.column_names , ["""col_1""", """col_2"""] )
for i, r in enumerate(lowercase_ ):
self.assertDictEqual(lowercase_ , example_records[i] )
def _snake_case ( self ) -> List[Any]:
lowerCAmelCase : List[Any] = self._create_example_records()
lowerCAmelCase : Optional[Any] = Dataset.from_list(lowercase_ )
lowerCAmelCase : str = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def _snake_case ( self ) -> List[Any]: # checks what happens with missing columns
lowerCAmelCase : List[str] = [{"""col_1""": 1}, {"""col_2""": """x"""}]
lowerCAmelCase : List[Any] = Dataset.from_list(lowercase_ )
self.assertDictEqual(dset[0] , {"""col_1""": 1} )
self.assertDictEqual(dset[1] , {"""col_1""": None} ) # NB: first record is used for columns
def _snake_case ( self ) -> List[Any]: # checks if the type can be inferred from the second record
lowerCAmelCase : Union[str, Any] = [{"""col_1""": []}, {"""col_1""": [1, 2]}]
lowerCAmelCase : Optional[Any] = Dataset.from_list(lowercase_ )
self.assertEqual(dset.info.features["""col_1"""] , Sequence(Value("""int64""" ) ) )
def _snake_case ( self ) -> Optional[Any]:
lowerCAmelCase : Optional[Any] = Dataset.from_list([] )
self.assertEqual(len(lowercase_ ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 693 |
import os
import string
import sys
lowerCAmelCase : Optional[int] =1 << 8
lowerCAmelCase : List[Any] ={
'tab': ord('\t'),
'newline': ord('\r'),
'esc': 27,
'up': 65 + ARROW_KEY_FLAG,
'down': 66 + ARROW_KEY_FLAG,
'right': 67 + ARROW_KEY_FLAG,
'left': 68 + ARROW_KEY_FLAG,
'mod_int': 91,
'undefined': sys.maxsize,
'interrupt': 3,
'insert': 50,
'delete': 51,
'pg_up': 53,
'pg_down': 54,
}
lowerCAmelCase : Optional[Any] =KEYMAP['up']
lowerCAmelCase : Tuple =KEYMAP['left']
if sys.platform == "win32":
lowerCAmelCase : Dict =[]
lowerCAmelCase : int ={
b'\xe0H': KEYMAP['up'] - ARROW_KEY_FLAG,
b'\x00H': KEYMAP['up'] - ARROW_KEY_FLAG,
b'\xe0P': KEYMAP['down'] - ARROW_KEY_FLAG,
b'\x00P': KEYMAP['down'] - ARROW_KEY_FLAG,
b'\xe0M': KEYMAP['right'] - ARROW_KEY_FLAG,
b'\x00M': KEYMAP['right'] - ARROW_KEY_FLAG,
b'\xe0K': KEYMAP['left'] - ARROW_KEY_FLAG,
b'\x00K': KEYMAP['left'] - ARROW_KEY_FLAG,
}
for i in range(10):
lowerCAmelCase : Optional[Any] =ord(str(i))
def _UpperCAmelCase ( ):
'''simple docstring'''
if os.name == "nt":
import msvcrt
lowerCAmelCase : Any = """mbcs"""
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(SCREAMING_SNAKE_CASE__ ) == 0:
# Read the keystroke
lowerCAmelCase : int = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
lowerCAmelCase : Tuple = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
lowerCAmelCase : str = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP["""mod_int"""] ) )
WIN_CH_BUFFER.append(SCREAMING_SNAKE_CASE__ )
if ord(SCREAMING_SNAKE_CASE__ ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(1_2_6 ) )
lowerCAmelCase : Optional[Any] = chr(KEYMAP["""esc"""] )
except KeyError:
lowerCAmelCase : Optional[int] = cha[1]
else:
lowerCAmelCase : Any = ch.decode(SCREAMING_SNAKE_CASE__ )
else:
lowerCAmelCase : Optional[int] = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
lowerCAmelCase : List[Any] = sys.stdin.fileno()
lowerCAmelCase : str = termios.tcgetattr(SCREAMING_SNAKE_CASE__ )
try:
tty.setraw(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Optional[Any] = sys.stdin.read(1 )
finally:
termios.tcsetattr(SCREAMING_SNAKE_CASE__ ,termios.TCSADRAIN ,SCREAMING_SNAKE_CASE__ )
return ch
def _UpperCAmelCase ( ):
'''simple docstring'''
lowerCAmelCase : Any = get_raw_chars()
if ord(SCREAMING_SNAKE_CASE__ ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(SCREAMING_SNAKE_CASE__ ) == KEYMAP["esc"]:
lowerCAmelCase : int = get_raw_chars()
if ord(SCREAMING_SNAKE_CASE__ ) == KEYMAP["mod_int"]:
lowerCAmelCase : Tuple = get_raw_chars()
if ord(SCREAMING_SNAKE_CASE__ ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(SCREAMING_SNAKE_CASE__ ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(SCREAMING_SNAKE_CASE__ ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 693 | 1 |
class _a :
def __init__( self , lowercase_ ) -> int:
# we need a list not a string, so do something to change the type
lowerCAmelCase : Tuple = arr.split(""",""" )
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase : int = [int(self.array[0] )] * len(self.array )
lowerCAmelCase : Optional[int] = [int(self.array[0] )] * len(self.array )
for i in range(1 , len(self.array ) ):
lowerCAmelCase : Any = max(
int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) )
lowerCAmelCase : Optional[int] = max(sum_value[i] , rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
lowerCAmelCase : Union[str, Any] =input('please input some numbers:')
lowerCAmelCase : Optional[int] =SubArray(whole_array)
lowerCAmelCase : Any =array.solve_sub_array()
print(('the results is:', re))
| 693 |
# Imports
import numpy as np
class _a :
def __init__( self , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None ) -> List[Any]:
self.set_matricies(red=lowercase_ , green=lowercase_ , blue=lowercase_ , red_edge=lowercase_ , nir=lowercase_ )
def _snake_case ( self , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None ) -> Union[str, Any]:
if red is not None:
lowerCAmelCase : str = red
if green is not None:
lowerCAmelCase : Optional[int] = green
if blue is not None:
lowerCAmelCase : Optional[int] = blue
if red_edge is not None:
lowerCAmelCase : Tuple = red_edge
if nir is not None:
lowerCAmelCase : Union[str, Any] = nir
return True
def _snake_case ( self , lowercase_="" , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None ) -> Optional[int]:
self.set_matricies(red=lowercase_ , green=lowercase_ , blue=lowercase_ , red_edge=lowercase_ , nir=lowercase_ )
lowerCAmelCase : int = {
"""ARVI2""": self.arvaa,
"""CCCI""": self.ccci,
"""CVI""": self.cvi,
"""GLI""": self.gli,
"""NDVI""": self.ndvi,
"""BNDVI""": self.bndvi,
"""redEdgeNDVI""": self.red_edge_ndvi,
"""GNDVI""": self.gndvi,
"""GBNDVI""": self.gbndvi,
"""GRNDVI""": self.grndvi,
"""RBNDVI""": self.rbndvi,
"""PNDVI""": self.pndvi,
"""ATSAVI""": self.atsavi,
"""BWDRVI""": self.bwdrvi,
"""CIgreen""": self.ci_green,
"""CIrededge""": self.ci_rededge,
"""CI""": self.ci,
"""CTVI""": self.ctvi,
"""GDVI""": self.gdvi,
"""EVI""": self.evi,
"""GEMI""": self.gemi,
"""GOSAVI""": self.gosavi,
"""GSAVI""": self.gsavi,
"""Hue""": self.hue,
"""IVI""": self.ivi,
"""IPVI""": self.ipvi,
"""I""": self.i,
"""RVI""": self.rvi,
"""MRVI""": self.mrvi,
"""MSAVI""": self.m_savi,
"""NormG""": self.norm_g,
"""NormNIR""": self.norm_nir,
"""NormR""": self.norm_r,
"""NGRDI""": self.ngrdi,
"""RI""": self.ri,
"""S""": self.s,
"""IF""": self._if,
"""DVI""": self.dvi,
"""TVI""": self.tvi,
"""NDRE""": self.ndre,
}
try:
return funcs[index]()
except KeyError:
print("""Index not in the list!""" )
return False
def _snake_case ( self ) -> Dict:
return -0.1_8 + (1.1_7 * ((self.nir - self.red) / (self.nir + self.red)))
def _snake_case ( self ) -> Optional[Any]:
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def _snake_case ( self ) -> List[str]:
return self.nir * (self.red / (self.green**2))
def _snake_case ( self ) -> Tuple:
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def _snake_case ( self ) -> Optional[int]:
return (self.nir - self.red) / (self.nir + self.red)
def _snake_case ( self ) -> List[str]:
return (self.nir - self.blue) / (self.nir + self.blue)
def _snake_case ( self ) -> int:
return (self.redEdge - self.red) / (self.redEdge + self.red)
def _snake_case ( self ) -> Optional[Any]:
return (self.nir - self.green) / (self.nir + self.green)
def _snake_case ( self ) -> Tuple:
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def _snake_case ( self ) -> Tuple:
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def _snake_case ( self ) -> int:
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def _snake_case ( self ) -> List[str]:
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def _snake_case ( self , lowercase_=0.0_8 , lowercase_=1.2_2 , lowercase_=0.0_3 ) -> int:
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def _snake_case ( self ) -> Optional[Any]:
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def _snake_case ( self ) -> Any:
return (self.nir / self.green) - 1
def _snake_case ( self ) -> List[Any]:
return (self.nir / self.redEdge) - 1
def _snake_case ( self ) -> str:
return (self.red - self.blue) / self.red
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase : Dict = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def _snake_case ( self ) -> Optional[Any]:
return self.nir - self.green
def _snake_case ( self ) -> int:
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def _snake_case ( self ) -> Optional[Any]:
lowerCAmelCase : Tuple = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.2_5 * n) - (self.red - 0.1_2_5) / (1 - self.red)
def _snake_case ( self , lowercase_=0.1_6 ) -> Optional[int]:
return (self.nir - self.green) / (self.nir + self.green + y)
def _snake_case ( self , lowercase_=0.5 ) -> List[str]:
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def _snake_case ( self ) -> Any:
return np.arctan(
((2 * self.red - self.green - self.blue) / 3_0.5) * (self.green - self.blue) )
def _snake_case ( self , lowercase_=None , lowercase_=None ) -> List[Any]:
return (self.nir - b) / (a * self.red)
def _snake_case ( self ) -> Any:
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def _snake_case ( self ) -> str:
return (self.red + self.green + self.blue) / 3_0.5
def _snake_case ( self ) -> Union[str, Any]:
return self.nir / self.red
def _snake_case ( self ) -> Tuple:
return (self.rvi() - 1) / (self.rvi() + 1)
def _snake_case ( self ) -> Dict:
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def _snake_case ( self ) -> List[Any]:
return self.green / (self.nir + self.red + self.green)
def _snake_case ( self ) -> int:
return self.nir / (self.nir + self.red + self.green)
def _snake_case ( self ) -> Dict:
return self.red / (self.nir + self.red + self.green)
def _snake_case ( self ) -> List[Any]:
return (self.green - self.red) / (self.green + self.red)
def _snake_case ( self ) -> Optional[int]:
return (self.red - self.green) / (self.red + self.green)
def _snake_case ( self ) -> Tuple:
lowerCAmelCase : Any = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
lowerCAmelCase : Dict = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def _snake_case ( self ) -> int:
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def _snake_case ( self ) -> List[str]:
return self.nir / self.red
def _snake_case ( self ) -> int:
return (self.ndvi() + 0.5) ** (1 / 2)
def _snake_case ( self ) -> str:
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 693 | 1 |
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
for param, grad_param in zip(model_a.parameters() ,model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad ,grad_param.grad ) is False
), F"""Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad ,grad_param.grad ) is True
), F"""Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})"""
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=True ):
'''simple docstring'''
model.train()
lowerCAmelCase : Dict = model(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Tuple = F.mse_loss(SCREAMING_SNAKE_CASE__ ,target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=False ):
'''simple docstring'''
set_seed(4_2 )
lowerCAmelCase : Tuple = RegressionModel()
lowerCAmelCase : List[Any] = deepcopy(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Dict = RegressionDataset(length=8_0 )
lowerCAmelCase : List[str] = DataLoader(SCREAMING_SNAKE_CASE__ ,batch_size=1_6 )
model.to(accelerator.device )
if sched:
lowerCAmelCase : Tuple = AdamW(params=model.parameters() ,lr=1e-3 )
lowerCAmelCase : Dict = AdamW(params=ddp_model.parameters() ,lr=1e-3 )
lowerCAmelCase : Optional[int] = LambdaLR(SCREAMING_SNAKE_CASE__ ,lr_lambda=lambda SCREAMING_SNAKE_CASE__ : epoch**0.65 )
lowerCAmelCase : Any = LambdaLR(SCREAMING_SNAKE_CASE__ ,lr_lambda=lambda SCREAMING_SNAKE_CASE__ : epoch**0.65 )
# Make a copy of `model`
if sched:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Any = accelerator.prepare(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
else:
lowerCAmelCase , lowerCAmelCase : Optional[int] = accelerator.prepare(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : List[str] = get_training_setup(SCREAMING_SNAKE_CASE__ )
# Use a single batch
lowerCAmelCase , lowerCAmelCase : Optional[int] = next(iter(SCREAMING_SNAKE_CASE__ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
lowerCAmelCase , lowerCAmelCase : List[Any] = accelerator.gather((ddp_input, ddp_target) )
lowerCAmelCase , lowerCAmelCase : Optional[int] = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(SCREAMING_SNAKE_CASE__ ):
step_model(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
else:
# Sync grads
step_model(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
for param, ddp_param in zip(model.parameters() ,ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad ,ddp_param.grad ), F"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
lowerCAmelCase : List[str] = ddp_input[torch.randperm(len(SCREAMING_SNAKE_CASE__ ) )]
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Optional[int] = get_training_setup(SCREAMING_SNAKE_CASE__ )
# Use a single batch
lowerCAmelCase , lowerCAmelCase : List[Any] = next(iter(SCREAMING_SNAKE_CASE__ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
lowerCAmelCase , lowerCAmelCase : Tuple = accelerator.gather((ddp_input, ddp_target) )
lowerCAmelCase , lowerCAmelCase : str = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(SCREAMING_SNAKE_CASE__ ):
step_model(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
else:
# Sync grads
step_model(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() ,ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad ,ddp_param.grad ) is False
), F"""Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad ,ddp_param.grad ) is True
), F"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
lowerCAmelCase : Optional[int] = ddp_input[torch.randperm(len(SCREAMING_SNAKE_CASE__ ) )]
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__=False ,SCREAMING_SNAKE_CASE__=False ):
'''simple docstring'''
lowerCAmelCase : str = Accelerator(
split_batches=SCREAMING_SNAKE_CASE__ ,dispatch_batches=SCREAMING_SNAKE_CASE__ ,gradient_accumulation_steps=2 )
# Test that context manager behaves properly
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : List[Any] = get_training_setup(SCREAMING_SNAKE_CASE__ )
for iteration, batch in enumerate(SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase , lowerCAmelCase : Optional[Any] = batch.values()
# Gather the distributed inputs and targs for the base model
lowerCAmelCase , lowerCAmelCase : List[Any] = accelerator.gather((ddp_input, ddp_target) )
lowerCAmelCase , lowerCAmelCase : Optional[Any] = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(SCREAMING_SNAKE_CASE__ ):
step_model(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() ,ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(SCREAMING_SNAKE_CASE__ ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad ,ddp_param.grad ) is True
), F"""Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad ,ddp_param.grad ) is False
), F"""Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
lowerCAmelCase : List[str] = ddp_input[torch.randperm(len(SCREAMING_SNAKE_CASE__ ) )]
GradientState._reset_state()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__=False ,SCREAMING_SNAKE_CASE__=False ):
'''simple docstring'''
lowerCAmelCase : Dict = Accelerator(
split_batches=SCREAMING_SNAKE_CASE__ ,dispatch_batches=SCREAMING_SNAKE_CASE__ ,gradient_accumulation_steps=2 )
# Test that context manager behaves properly
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Any = get_training_setup(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
for iteration, batch in enumerate(SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase , lowerCAmelCase : Optional[int] = batch.values()
# Gather the distributed inputs and targs for the base model
lowerCAmelCase , lowerCAmelCase : Tuple = accelerator.gather((ddp_input, ddp_target) )
lowerCAmelCase , lowerCAmelCase : Any = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(SCREAMING_SNAKE_CASE__ )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(SCREAMING_SNAKE_CASE__ ):
step_model(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), F"""Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n"""
lowerCAmelCase : Dict = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(SCREAMING_SNAKE_CASE__ ))
if accelerator.num_processes > 1:
check_model_parameters(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
GradientState._reset_state()
def _UpperCAmelCase ( ):
'''simple docstring'''
lowerCAmelCase : List[Any] = Accelerator()
lowerCAmelCase : Dict = RegressionDataset(length=8_0 )
lowerCAmelCase : Dict = DataLoader(SCREAMING_SNAKE_CASE__ ,batch_size=1_6 )
lowerCAmelCase : List[str] = RegressionDataset(length=9_6 )
lowerCAmelCase : List[str] = DataLoader(SCREAMING_SNAKE_CASE__ ,batch_size=1_6 )
lowerCAmelCase , lowerCAmelCase : Dict = accelerator.prepare(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(SCREAMING_SNAKE_CASE__ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(SCREAMING_SNAKE_CASE__ )
if iteration < len(SCREAMING_SNAKE_CASE__ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(SCREAMING_SNAKE_CASE__ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(SCREAMING_SNAKE_CASE__ )
if batch_num < len(SCREAMING_SNAKE_CASE__ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def _UpperCAmelCase ( ):
'''simple docstring'''
lowerCAmelCase : Any = Accelerator()
lowerCAmelCase : Union[str, Any] = accelerator.state
if state.local_process_index == 0:
print("""**Test `accumulate` gradient accumulation with dataloader break**""" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("""**Test NOOP `no_sync` context manager**""" )
test_noop_sync(SCREAMING_SNAKE_CASE__ )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("""**Test Distributed `no_sync` context manager**""" )
test_distributed_sync(SCREAMING_SNAKE_CASE__ )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation, """ ,F"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" ,)
test_gradient_accumulation(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("""<""" ,"""2.0""" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """ ,"""`split_batches=False`, `dispatch_batches=False`**""" ,)
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """ ,F"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" ,)
test_gradient_accumulation_with_opt_and_scheduler(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 693 |
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : List[str] = None
if token is not None:
lowerCAmelCase : Union[str, Any] = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"""Bearer {token}"""}
lowerCAmelCase : Optional[Any] = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"""
lowerCAmelCase : Any = requests.get(SCREAMING_SNAKE_CASE__ ,headers=SCREAMING_SNAKE_CASE__ ).json()
lowerCAmelCase : List[str] = {}
try:
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
lowerCAmelCase : int = math.ceil((result["""total_count"""] - 1_0_0) / 1_0_0 )
for i in range(SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase : List[str] = requests.get(url + F"""&page={i + 2}""" ,headers=SCREAMING_SNAKE_CASE__ ).json()
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return job_links
except Exception:
print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = None
if token is not None:
lowerCAmelCase : str = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"""Bearer {token}"""}
lowerCAmelCase : Optional[int] = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"""
lowerCAmelCase : Optional[int] = requests.get(SCREAMING_SNAKE_CASE__ ,headers=SCREAMING_SNAKE_CASE__ ).json()
lowerCAmelCase : List[str] = {}
try:
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
lowerCAmelCase : Optional[int] = math.ceil((result["""total_count"""] - 1_0_0) / 1_0_0 )
for i in range(SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase : int = requests.get(url + F"""&page={i + 2}""" ,headers=SCREAMING_SNAKE_CASE__ ).json()
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
return artifacts
except Exception:
print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Dict = None
if token is not None:
lowerCAmelCase : Optional[Any] = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"""Bearer {token}"""}
lowerCAmelCase : str = requests.get(SCREAMING_SNAKE_CASE__ ,headers=SCREAMING_SNAKE_CASE__ ,allow_redirects=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Union[str, Any] = result.headers["""Location"""]
lowerCAmelCase : Optional[int] = requests.get(SCREAMING_SNAKE_CASE__ ,allow_redirects=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Union[str, Any] = os.path.join(SCREAMING_SNAKE_CASE__ ,F"""{artifact_name}.zip""" )
with open(SCREAMING_SNAKE_CASE__ ,"""wb""" ) as fp:
fp.write(response.content )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : int = []
lowerCAmelCase : Optional[int] = []
lowerCAmelCase : Optional[int] = None
with zipfile.ZipFile(SCREAMING_SNAKE_CASE__ ) as z:
for filename in z.namelist():
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(SCREAMING_SNAKE_CASE__ ) as f:
for line in f:
lowerCAmelCase : Optional[Any] = line.decode("""UTF-8""" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
lowerCAmelCase : str = line[: line.index(""": """ )]
lowerCAmelCase : Optional[int] = line[line.index(""": """ ) + len(""": """ ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("""FAILED """ ):
# `test` is the test method that failed
lowerCAmelCase : Union[str, Any] = line[len("""FAILED """ ) :]
failed_tests.append(SCREAMING_SNAKE_CASE__ )
elif filename == "job_name.txt":
lowerCAmelCase : Union[str, Any] = line
if len(SCREAMING_SNAKE_CASE__ ) != len(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
F"""`errors` and `failed_tests` should have the same number of elements. Got {len(SCREAMING_SNAKE_CASE__ )} for `errors` """
F"""and {len(SCREAMING_SNAKE_CASE__ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"""
""" problem.""" )
lowerCAmelCase : Optional[int] = None
if job_name and job_links:
lowerCAmelCase : Optional[int] = job_links.get(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
# A list with elements of the form (line of error, error, failed test)
lowerCAmelCase : Union[str, Any] = [x + [y] + [job_link] for x, y in zip(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )]
return result
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : str = []
lowerCAmelCase : Union[str, Any] = [os.path.join(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) for p in os.listdir(SCREAMING_SNAKE_CASE__ ) if p.endswith(""".zip""" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(SCREAMING_SNAKE_CASE__ ,job_links=SCREAMING_SNAKE_CASE__ ) )
return errors
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : int = Counter()
counter.update([x[1] for x in logs] )
lowerCAmelCase : List[str] = counter.most_common()
lowerCAmelCase : Union[str, Any] = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
lowerCAmelCase : List[Any] = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]}
lowerCAmelCase : int = dict(sorted(r.items() ,key=lambda SCREAMING_SNAKE_CASE__ : item[1]["count"] ,reverse=SCREAMING_SNAKE_CASE__ ) )
return r
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = test.split("""::""" )[0]
if test.startswith("""tests/models/""" ):
lowerCAmelCase : str = test.split("""/""" )[2]
else:
lowerCAmelCase : List[Any] = None
return test
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : List[Any] = [(x[0], x[1], get_model(x[2] )) for x in logs]
lowerCAmelCase : int = [x for x in logs if x[2] is not None]
lowerCAmelCase : Optional[Any] = {x[2] for x in logs}
lowerCAmelCase : Dict = {}
for test in tests:
lowerCAmelCase : Optional[int] = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
lowerCAmelCase : Tuple = counter.most_common()
lowerCAmelCase : Union[str, Any] = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
lowerCAmelCase : List[Any] = sum(error_counts.values() )
if n_errors > 0:
lowerCAmelCase : Optional[int] = {"""count""": n_errors, """errors""": error_counts}
lowerCAmelCase : Any = dict(sorted(r.items() ,key=lambda SCREAMING_SNAKE_CASE__ : item[1]["count"] ,reverse=SCREAMING_SNAKE_CASE__ ) )
return r
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = """| no. | error | status |"""
lowerCAmelCase : List[Any] = """|-:|:-|:-|"""
lowerCAmelCase : Union[str, Any] = [header, sep]
for error in reduced_by_error:
lowerCAmelCase : List[str] = reduced_by_error[error]["""count"""]
lowerCAmelCase : Any = F"""| {count} | {error[:1_0_0]} | |"""
lines.append(SCREAMING_SNAKE_CASE__ )
return "\n".join(SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : str = """| model | no. of errors | major error | count |"""
lowerCAmelCase : Any = """|-:|-:|-:|-:|"""
lowerCAmelCase : str = [header, sep]
for model in reduced_by_model:
lowerCAmelCase : Any = reduced_by_model[model]["""count"""]
lowerCAmelCase , lowerCAmelCase : Optional[int] = list(reduced_by_model[model]["""errors"""].items() )[0]
lowerCAmelCase : Optional[Any] = F"""| {model} | {count} | {error[:6_0]} | {_count} |"""
lines.append(SCREAMING_SNAKE_CASE__ )
return "\n".join(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
lowerCAmelCase : int =argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
lowerCAmelCase : Dict =parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
lowerCAmelCase : Optional[int] =get_job_links(args.workflow_run_id, token=args.token)
lowerCAmelCase : List[Any] ={}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
lowerCAmelCase : str =k.find(' / ')
lowerCAmelCase : Any =k[index + len(' / ') :]
lowerCAmelCase : str =v
with open(os.path.join(args.output_dir, 'job_links.json'), 'w', encoding='UTF-8') as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
lowerCAmelCase : Any =get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
lowerCAmelCase : List[Any] =get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
lowerCAmelCase : str =Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
lowerCAmelCase : int =counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, 'errors.json'), 'w', encoding='UTF-8') as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
lowerCAmelCase : Optional[int] =reduce_by_error(errors)
lowerCAmelCase : Tuple =reduce_by_model(errors)
lowerCAmelCase : Optional[Any] =make_github_table(reduced_by_error)
lowerCAmelCase : Union[str, Any] =make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, 'reduced_by_error.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, 'reduced_by_model.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
| 693 | 1 |
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=1_0 ):
'''simple docstring'''
lowerCAmelCase : int = []
for _ in range(SCREAMING_SNAKE_CASE__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=1_0 ):
'''simple docstring'''
lowerCAmelCase : Dict = []
for step in range(SCREAMING_SNAKE_CASE__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase : Tuple = os.path.join(SCREAMING_SNAKE_CASE__ ,"""schedule.bin""" )
torch.save(scheduler.state_dict() ,SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Union[str, Any] = torch.load(SCREAMING_SNAKE_CASE__ )
scheduler.load_state_dict(SCREAMING_SNAKE_CASE__ )
return lrs
@require_torch
class _a ( unittest.TestCase ):
def _snake_case ( self , lowercase_ , lowercase_ , lowercase_ ) -> List[str]:
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) )
for a, b in zip(lowercase_ , lowercase_ ):
self.assertAlmostEqual(lowercase_ , lowercase_ , delta=lowercase_ )
def _snake_case ( self ) -> str:
lowerCAmelCase : Optional[Any] = torch.tensor([0.1, -0.2, -0.1] , requires_grad=lowercase_ )
lowerCAmelCase : int = torch.tensor([0.4, 0.2, -0.5] )
lowerCAmelCase : List[Any] = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
lowerCAmelCase : int = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 )
for _ in range(100 ):
lowerCAmelCase : int = criterion(lowercase_ , lowercase_ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
def _snake_case ( self ) -> Dict:
lowerCAmelCase : Optional[Any] = torch.tensor([0.1, -0.2, -0.1] , requires_grad=lowercase_ )
lowerCAmelCase : int = torch.tensor([0.4, 0.2, -0.5] )
lowerCAmelCase : List[str] = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
lowerCAmelCase : str = Adafactor(
params=[w] , lr=1e-2 , eps=(1e-30, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=lowercase_ , weight_decay=0.0 , relative_step=lowercase_ , scale_parameter=lowercase_ , warmup_init=lowercase_ , )
for _ in range(1000 ):
lowerCAmelCase : int = criterion(lowercase_ , lowercase_ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
@require_torch
class _a ( unittest.TestCase ):
_UpperCamelCase: Dict = nn.Linear(50 , 50 ) if is_torch_available() else None
_UpperCamelCase: int = AdamW(m.parameters() , lr=1_0.0 ) if is_torch_available() else None
_UpperCamelCase: List[str] = 10
def _snake_case ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_=None ) -> Optional[int]:
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) )
for a, b in zip(lowercase_ , lowercase_ ):
self.assertAlmostEqual(lowercase_ , lowercase_ , delta=lowercase_ , msg=lowercase_ )
def _snake_case ( self ) -> Optional[Any]:
lowerCAmelCase : Tuple = {"""num_warmup_steps""": 2, """num_training_steps""": 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
lowerCAmelCase : List[Any] = {
get_constant_schedule: ({}, [1_0.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"""num_warmup_steps""": 4},
[0.0, 2.5, 5.0, 7.5, 1_0.0, 1_0.0, 1_0.0, 1_0.0, 1_0.0, 1_0.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 1_0.0, 8.7_5, 7.5, 6.2_5, 5.0, 3.7_5, 2.5, 1.2_5],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 1_0.0, 9.6_1, 8.5_3, 6.9_1, 5.0, 3.0_8, 1.4_6, 0.3_8],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, """num_cycles""": 2},
[0.0, 5.0, 1_0.0, 8.5_3, 5.0, 1.4_6, 1_0.0, 8.5_3, 5.0, 1.4_6],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, """power""": 2.0, """lr_end""": 1e-7},
[0.0, 5.0, 1_0.0, 7.6_5_6, 5.6_2_5, 3.9_0_6, 2.5, 1.4_0_6, 0.6_2_5, 0.1_5_6],
),
get_inverse_sqrt_schedule: (
{"""num_warmup_steps""": 2},
[0.0, 5.0, 1_0.0, 8.1_6_5, 7.0_7_1, 6.3_2_5, 5.7_7_4, 5.3_4_5, 5.0, 4.7_1_4],
),
}
for scheduler_func, data in scheds.items():
lowerCAmelCase , lowerCAmelCase : Optional[Any] = data
lowerCAmelCase : List[str] = scheduler_func(self.optimizer , **lowercase_ )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
lowerCAmelCase : Union[str, Any] = unwrap_schedule(lowercase_ , self.num_steps )
self.assertListAlmostEqual(
lowercase_ , lowercase_ , tol=1e-2 , msg=f"""failed for {scheduler_func} in normal scheduler""" , )
lowerCAmelCase : str = scheduler_func(self.optimizer , **lowercase_ )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(lowercase_ ) # wrap to test picklability of the schedule
lowerCAmelCase : Optional[Any] = unwrap_and_save_reload_schedule(lowercase_ , self.num_steps )
self.assertListEqual(lowercase_ , lowercase_ , msg=f"""failed for {scheduler_func} in save and reload""" )
class _a :
def __init__( self , lowercase_ ) -> int:
lowerCAmelCase : Optional[int] = fn
def __call__( self , *lowercase_ , **lowercase_ ) -> Dict:
return self.fn(*lowercase_ , **lowercase_ )
@classmethod
def _snake_case ( self , lowercase_ ) -> Optional[int]:
lowerCAmelCase : Dict = list(map(self , scheduler.lr_lambdas ) )
| 693 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase : Optional[int] ={
'configuration_autoformer': [
'AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AutoformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Dict =[
'AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'AutoformerForPrediction',
'AutoformerModel',
'AutoformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Any =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 693 | 1 |
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : str = VideoMAEConfig()
set_architecture_configs(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
if "finetuned" not in model_name:
lowerCAmelCase : Optional[int] = False
if "finetuned" in model_name:
lowerCAmelCase : Optional[Any] = """huggingface/label-files"""
if "kinetics" in model_name:
lowerCAmelCase : int = 4_0_0
lowerCAmelCase : Optional[int] = """kinetics400-id2label.json"""
elif "ssv2" in model_name:
lowerCAmelCase : Tuple = 1_7_4
lowerCAmelCase : Tuple = """something-something-v2-id2label.json"""
else:
raise ValueError("""Model name should either contain 'kinetics' or 'ssv2' in case it's fine-tuned.""" )
lowerCAmelCase : str = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,repo_type="""dataset""" ) ,"""r""" ) )
lowerCAmelCase : int = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
lowerCAmelCase : str = idalabel
lowerCAmelCase : Optional[Any] = {v: k for k, v in idalabel.items()}
return config
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if "small" in model_name:
lowerCAmelCase : List[str] = 3_8_4
lowerCAmelCase : int = 1_5_3_6
lowerCAmelCase : Tuple = 1_2
lowerCAmelCase : Tuple = 1_6
lowerCAmelCase : List[str] = 1_2
lowerCAmelCase : Tuple = 3
lowerCAmelCase : Any = 1_9_2
lowerCAmelCase : str = 7_6_8
elif "large" in model_name:
lowerCAmelCase : int = 1_0_2_4
lowerCAmelCase : Tuple = 4_0_9_6
lowerCAmelCase : List[str] = 2_4
lowerCAmelCase : Optional[Any] = 1_6
lowerCAmelCase : Union[str, Any] = 1_2
lowerCAmelCase : Any = 8
lowerCAmelCase : Optional[Any] = 5_1_2
lowerCAmelCase : Optional[Any] = 2_0_4_8
elif "huge" in model_name:
lowerCAmelCase : Dict = 1_2_8_0
lowerCAmelCase : Optional[int] = 5_1_2_0
lowerCAmelCase : Dict = 3_2
lowerCAmelCase : str = 1_6
lowerCAmelCase : Optional[int] = 1_2
lowerCAmelCase : str = 8
lowerCAmelCase : str = 6_4_0
lowerCAmelCase : List[Any] = 2_5_6_0
elif "base" not in model_name:
raise ValueError("""Model name should include either \"small\", \"base\", \"large\", or \"huge\"""" )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if "encoder." in name:
lowerCAmelCase : int = name.replace("""encoder.""" ,"""""" )
if "cls_token" in name:
lowerCAmelCase : Tuple = name.replace("""cls_token""" ,"""videomae.embeddings.cls_token""" )
if "decoder_pos_embed" in name:
lowerCAmelCase : str = name.replace("""decoder_pos_embed""" ,"""decoder.decoder_pos_embed""" )
if "pos_embed" in name and "decoder" not in name:
lowerCAmelCase : Dict = name.replace("""pos_embed""" ,"""videomae.embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
lowerCAmelCase : Optional[Any] = name.replace("""patch_embed.proj""" ,"""videomae.embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
lowerCAmelCase : Any = name.replace("""patch_embed.norm""" ,"""videomae.embeddings.norm""" )
if "decoder.blocks" in name:
lowerCAmelCase : Any = name.replace("""decoder.blocks""" ,"""decoder.decoder_layers""" )
if "blocks" in name:
lowerCAmelCase : Any = name.replace("""blocks""" ,"""videomae.encoder.layer""" )
if "attn.proj" in name:
lowerCAmelCase : Union[str, Any] = name.replace("""attn.proj""" ,"""attention.output.dense""" )
if "attn" in name and "bias" not in name:
lowerCAmelCase : Any = name.replace("""attn""" ,"""attention.self""" )
if "attn" in name:
lowerCAmelCase : Optional[int] = name.replace("""attn""" ,"""attention.attention""" )
if "norm1" in name:
lowerCAmelCase : List[str] = name.replace("""norm1""" ,"""layernorm_before""" )
if "norm2" in name:
lowerCAmelCase : List[Any] = name.replace("""norm2""" ,"""layernorm_after""" )
if "mlp.fc1" in name:
lowerCAmelCase : int = name.replace("""mlp.fc1""" ,"""intermediate.dense""" )
if "mlp.fc2" in name:
lowerCAmelCase : Union[str, Any] = name.replace("""mlp.fc2""" ,"""output.dense""" )
if "decoder_embed" in name:
lowerCAmelCase : List[Any] = name.replace("""decoder_embed""" ,"""decoder.decoder_embed""" )
if "decoder_norm" in name:
lowerCAmelCase : str = name.replace("""decoder_norm""" ,"""decoder.decoder_norm""" )
if "decoder_pred" in name:
lowerCAmelCase : List[str] = name.replace("""decoder_pred""" ,"""decoder.decoder_pred""" )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
lowerCAmelCase : str = name.replace("""norm.weight""" ,"""videomae.layernorm.weight""" )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
lowerCAmelCase : Dict = name.replace("""norm.bias""" ,"""videomae.layernorm.bias""" )
if "head" in name and "decoder" not in name:
lowerCAmelCase : int = name.replace("""head""" ,"""classifier""" )
return name
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
lowerCAmelCase : str = orig_state_dict.pop(SCREAMING_SNAKE_CASE__ )
if key.startswith("""encoder.""" ):
lowerCAmelCase : Tuple = key.replace("""encoder.""" ,"""""" )
if "qkv" in key:
lowerCAmelCase : Tuple = key.split(""".""" )
if key.startswith("""decoder.blocks""" ):
lowerCAmelCase : Any = config.decoder_hidden_size
lowerCAmelCase : Union[str, Any] = int(key_split[2] )
lowerCAmelCase : Optional[int] = """decoder.decoder_layers."""
if "weight" in key:
lowerCAmelCase : List[Any] = val[:dim, :]
lowerCAmelCase : str = val[dim : dim * 2, :]
lowerCAmelCase : List[str] = val[-dim:, :]
else:
lowerCAmelCase : int = config.hidden_size
lowerCAmelCase : Optional[int] = int(key_split[1] )
lowerCAmelCase : List[Any] = """videomae.encoder.layer."""
if "weight" in key:
lowerCAmelCase : Union[str, Any] = val[:dim, :]
lowerCAmelCase : List[Any] = val[dim : dim * 2, :]
lowerCAmelCase : List[Any] = val[-dim:, :]
else:
lowerCAmelCase : str = val
return orig_state_dict
def _UpperCAmelCase ( ):
'''simple docstring'''
lowerCAmelCase : List[Any] = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" ,filename="""eating_spaghetti.npy""" ,repo_type="""dataset""" )
lowerCAmelCase : Optional[int] = np.load(SCREAMING_SNAKE_CASE__ )
return list(SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = get_videomae_config(SCREAMING_SNAKE_CASE__ )
if "finetuned" in model_name:
lowerCAmelCase : List[str] = VideoMAEForVideoClassification(SCREAMING_SNAKE_CASE__ )
else:
lowerCAmelCase : Any = VideoMAEForPreTraining(SCREAMING_SNAKE_CASE__ )
# download original checkpoint, hosted on Google Drive
lowerCAmelCase : Tuple = """pytorch_model.bin"""
gdown.cached_download(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,quiet=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Dict = torch.load(SCREAMING_SNAKE_CASE__ ,map_location="""cpu""" )
if "model" in files:
lowerCAmelCase : List[str] = files["""model"""]
else:
lowerCAmelCase : str = files["""module"""]
lowerCAmelCase : Optional[Any] = convert_state_dict(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
model.eval()
# verify model on basic input
lowerCAmelCase : Optional[Any] = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] ,image_std=[0.5, 0.5, 0.5] )
lowerCAmelCase : Optional[Any] = prepare_video()
lowerCAmelCase : Tuple = image_processor(SCREAMING_SNAKE_CASE__ ,return_tensors="""pt""" )
if "finetuned" not in model_name:
lowerCAmelCase : Optional[Any] = hf_hub_download(repo_id="""hf-internal-testing/bool-masked-pos""" ,filename="""bool_masked_pos.pt""" )
lowerCAmelCase : int = torch.load(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Tuple = model(**SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : int = outputs.logits
lowerCAmelCase : Tuple = [
"""videomae-small-finetuned-kinetics""",
"""videomae-small-finetuned-ssv2""",
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
"""videomae-base-short""",
"""videomae-base-short-finetuned-kinetics""",
"""videomae-base""",
"""videomae-base-finetuned-kinetics""",
"""videomae-large""",
"""videomae-large-finetuned-kinetics""",
"""videomae-huge-finetuned-kinetics""",
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
"""videomae-base-short-ssv2""",
"""videomae-base-short-finetuned-ssv2""",
"""videomae-base-ssv2""",
"""videomae-base-finetuned-ssv2""",
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
lowerCAmelCase : Tuple = torch.Size([1, 4_0_0] )
lowerCAmelCase : Union[str, Any] = torch.tensor([-0.9291, -0.4061, -0.9307] )
elif model_name == "videomae-small-finetuned-ssv2":
lowerCAmelCase : Optional[Any] = torch.Size([1, 1_7_4] )
lowerCAmelCase : str = torch.tensor([0.2671, -0.4689, -0.8235] )
elif model_name == "videomae-base":
lowerCAmelCase : Union[str, Any] = torch.Size([1, 1_4_0_8, 1_5_3_6] )
lowerCAmelCase : List[Any] = torch.tensor([[0.7739, 0.7968, 0.7089], [0.6701, 0.7487, 0.6209], [0.4287, 0.5158, 0.4773]] )
elif model_name == "videomae-base-short":
lowerCAmelCase : Optional[int] = torch.Size([1, 1_4_0_8, 1_5_3_6] )
lowerCAmelCase : int = torch.tensor([[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]] )
# we verified the loss both for normalized and unnormalized targets for this one
lowerCAmelCase : Dict = torch.tensor([0.5142] ) if config.norm_pix_loss else torch.tensor([0.6469] )
elif model_name == "videomae-large":
lowerCAmelCase : Union[str, Any] = torch.Size([1, 1_4_0_8, 1_5_3_6] )
lowerCAmelCase : Tuple = torch.tensor([[0.7149, 0.7997, 0.6966], [0.6768, 0.7869, 0.6948], [0.5139, 0.6221, 0.5605]] )
elif model_name == "videomae-large-finetuned-kinetics":
lowerCAmelCase : List[Any] = torch.Size([1, 4_0_0] )
lowerCAmelCase : Optional[int] = torch.tensor([0.0771, 0.0011, -0.3625] )
elif model_name == "videomae-huge-finetuned-kinetics":
lowerCAmelCase : Optional[int] = torch.Size([1, 4_0_0] )
lowerCAmelCase : Any = torch.tensor([0.2433, 0.1632, -0.4894] )
elif model_name == "videomae-base-short-finetuned-kinetics":
lowerCAmelCase : List[str] = torch.Size([1, 4_0_0] )
lowerCAmelCase : List[Any] = torch.tensor([0.6588, 0.0990, -0.2493] )
elif model_name == "videomae-base-finetuned-kinetics":
lowerCAmelCase : Dict = torch.Size([1, 4_0_0] )
lowerCAmelCase : List[Any] = torch.tensor([0.3669, -0.0688, -0.2421] )
elif model_name == "videomae-base-short-ssv2":
lowerCAmelCase : Optional[Any] = torch.Size([1, 1_4_0_8, 1_5_3_6] )
lowerCAmelCase : Dict = torch.tensor([[0.4712, 0.5296, 0.5786], [0.2278, 0.2729, 0.4026], [0.0352, 0.0730, 0.2506]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
lowerCAmelCase : Dict = torch.Size([1, 1_7_4] )
lowerCAmelCase : Any = torch.tensor([-0.0537, -0.1539, -0.3266] )
elif model_name == "videomae-base-ssv2":
lowerCAmelCase : List[str] = torch.Size([1, 1_4_0_8, 1_5_3_6] )
lowerCAmelCase : Dict = torch.tensor([[0.8131, 0.8727, 0.8546], [0.7366, 0.9377, 0.8870], [0.5935, 0.8874, 0.8564]] )
elif model_name == "videomae-base-finetuned-ssv2":
lowerCAmelCase : Optional[Any] = torch.Size([1, 1_7_4] )
lowerCAmelCase : Optional[Any] = torch.tensor([0.1961, -0.8337, -0.6389] )
else:
raise ValueError(F"""Model name not supported. Should be one of {model_names}""" )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] ,SCREAMING_SNAKE_CASE__ ,atol=1e-4 )
else:
print("""Logits:""" ,logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] ,SCREAMING_SNAKE_CASE__ ,atol=1e-4 )
print("""Logits ok!""" )
# verify loss, if applicable
if model_name == "videomae-base-short":
lowerCAmelCase : Optional[int] = outputs.loss
assert torch.allclose(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,atol=1e-4 )
print("""Loss ok!""" )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
if push_to_hub:
print("""Pushing to the hub...""" )
model.push_to_hub(SCREAMING_SNAKE_CASE__ ,organization="""nielsr""" )
if __name__ == "__main__":
lowerCAmelCase : Union[str, Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4',
type=str,
help=(
'URL of the original PyTorch checkpoint (on Google Drive) you\'d like to convert. Should be a direct'
' download link.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='/Users/nielsrogge/Documents/VideoMAE/Test',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--model_name', default='videomae-base', type=str, help='Name of the model.')
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
lowerCAmelCase : int =parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 693 |
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCAmelCase : Optional[int] =logging.get_logger(__name__)
lowerCAmelCase : Union[str, Any] ={
'facebook/detr-resnet-50': 'https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class _a ( snake_case_ ):
_UpperCamelCase: List[str] = "detr"
_UpperCamelCase: Dict = ["past_key_values"]
_UpperCamelCase: Optional[int] = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self , lowercase_=True , lowercase_=None , lowercase_=3 , lowercase_=100 , lowercase_=6 , lowercase_=2048 , lowercase_=8 , lowercase_=6 , lowercase_=2048 , lowercase_=8 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=True , lowercase_="relu" , lowercase_=256 , lowercase_=0.1 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.0_2 , lowercase_=1.0 , lowercase_=False , lowercase_="sine" , lowercase_="resnet50" , lowercase_=True , lowercase_=False , lowercase_=1 , lowercase_=5 , lowercase_=2 , lowercase_=1 , lowercase_=1 , lowercase_=5 , lowercase_=2 , lowercase_=0.1 , **lowercase_ , ) -> Optional[int]:
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
lowerCAmelCase : Optional[Any] = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(lowercase_ , lowercase_ ):
lowerCAmelCase : List[Any] = backbone_config.get("""model_type""" )
lowerCAmelCase : int = CONFIG_MAPPING[backbone_model_type]
lowerCAmelCase : Optional[int] = config_class.from_dict(lowercase_ )
# set timm attributes to None
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Dict = None, None, None
lowerCAmelCase : Any = use_timm_backbone
lowerCAmelCase : int = backbone_config
lowerCAmelCase : Optional[int] = num_channels
lowerCAmelCase : Optional[Any] = num_queries
lowerCAmelCase : List[str] = d_model
lowerCAmelCase : Optional[int] = encoder_ffn_dim
lowerCAmelCase : Dict = encoder_layers
lowerCAmelCase : str = encoder_attention_heads
lowerCAmelCase : List[Any] = decoder_ffn_dim
lowerCAmelCase : List[Any] = decoder_layers
lowerCAmelCase : Union[str, Any] = decoder_attention_heads
lowerCAmelCase : str = dropout
lowerCAmelCase : Dict = attention_dropout
lowerCAmelCase : Union[str, Any] = activation_dropout
lowerCAmelCase : str = activation_function
lowerCAmelCase : Optional[int] = init_std
lowerCAmelCase : Any = init_xavier_std
lowerCAmelCase : Dict = encoder_layerdrop
lowerCAmelCase : int = decoder_layerdrop
lowerCAmelCase : Tuple = encoder_layers
lowerCAmelCase : Optional[int] = auxiliary_loss
lowerCAmelCase : List[str] = position_embedding_type
lowerCAmelCase : Any = backbone
lowerCAmelCase : Union[str, Any] = use_pretrained_backbone
lowerCAmelCase : List[Any] = dilation
# Hungarian matcher
lowerCAmelCase : Tuple = class_cost
lowerCAmelCase : Union[str, Any] = bbox_cost
lowerCAmelCase : Optional[Any] = giou_cost
# Loss coefficients
lowerCAmelCase : List[Any] = mask_loss_coefficient
lowerCAmelCase : Optional[int] = dice_loss_coefficient
lowerCAmelCase : Tuple = bbox_loss_coefficient
lowerCAmelCase : Dict = giou_loss_coefficient
lowerCAmelCase : str = eos_coefficient
super().__init__(is_encoder_decoder=lowercase_ , **lowercase_ )
@property
def _snake_case ( self ) -> int:
return self.encoder_attention_heads
@property
def _snake_case ( self ) -> int:
return self.d_model
@classmethod
def _snake_case ( cls , lowercase_ , **lowercase_ ) -> Any:
return cls(backbone_config=lowercase_ , **lowercase_ )
def _snake_case ( self ) -> Dict[str, any]:
lowerCAmelCase : Optional[int] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
lowerCAmelCase : List[str] = self.backbone_config.to_dict()
lowerCAmelCase : List[Any] = self.__class__.model_type
return output
class _a ( snake_case_ ):
_UpperCamelCase: Any = version.parse("1.11" )
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def _snake_case ( self ) -> float:
return 1e-5
@property
def _snake_case ( self ) -> int:
return 12
| 693 | 1 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCAmelCase : Union[str, Any] =logging.get_logger(__name__)
lowerCAmelCase : Any ={'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase : int ={
'vocab_file': {
'gpt2': 'https://huggingface.co/gpt2/resolve/main/vocab.json',
'gpt2-medium': 'https://huggingface.co/gpt2-medium/resolve/main/vocab.json',
'gpt2-large': 'https://huggingface.co/gpt2-large/resolve/main/vocab.json',
'gpt2-xl': 'https://huggingface.co/gpt2-xl/resolve/main/vocab.json',
'distilgpt2': 'https://huggingface.co/distilgpt2/resolve/main/vocab.json',
},
'merges_file': {
'gpt2': 'https://huggingface.co/gpt2/resolve/main/merges.txt',
'gpt2-medium': 'https://huggingface.co/gpt2-medium/resolve/main/merges.txt',
'gpt2-large': 'https://huggingface.co/gpt2-large/resolve/main/merges.txt',
'gpt2-xl': 'https://huggingface.co/gpt2-xl/resolve/main/merges.txt',
'distilgpt2': 'https://huggingface.co/distilgpt2/resolve/main/merges.txt',
},
'tokenizer_file': {
'gpt2': 'https://huggingface.co/gpt2/resolve/main/tokenizer.json',
'gpt2-medium': 'https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json',
'gpt2-large': 'https://huggingface.co/gpt2-large/resolve/main/tokenizer.json',
'gpt2-xl': 'https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json',
'distilgpt2': 'https://huggingface.co/distilgpt2/resolve/main/tokenizer.json',
},
}
lowerCAmelCase : Tuple ={
'gpt2': 1_024,
'gpt2-medium': 1_024,
'gpt2-large': 1_024,
'gpt2-xl': 1_024,
'distilgpt2': 1_024,
}
class _a ( snake_case_ ):
_UpperCamelCase: Any = VOCAB_FILES_NAMES
_UpperCamelCase: List[Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase: Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase: List[Any] = ["input_ids", "attention_mask"]
_UpperCamelCase: str = GPTaTokenizer
def __init__( self , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_="<|endoftext|>" , lowercase_="<|endoftext|>" , lowercase_="<|endoftext|>" , lowercase_=False , **lowercase_ , ) -> Any:
super().__init__(
lowercase_ , lowercase_ , tokenizer_file=lowercase_ , unk_token=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , add_prefix_space=lowercase_ , **lowercase_ , )
lowerCAmelCase : Dict = kwargs.pop("""add_bos_token""" , lowercase_ )
lowerCAmelCase : Optional[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , lowercase_ ) != add_prefix_space:
lowerCAmelCase : Optional[int] = getattr(lowercase_ , pre_tok_state.pop("""type""" ) )
lowerCAmelCase : Dict = add_prefix_space
lowerCAmelCase : Optional[Any] = pre_tok_class(**lowercase_ )
lowerCAmelCase : List[Any] = add_prefix_space
def _snake_case ( self , *lowercase_ , **lowercase_ ) -> BatchEncoding:
lowerCAmelCase : str = kwargs.get("""is_split_into_words""" , lowercase_ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*lowercase_ , **lowercase_ )
def _snake_case ( self , *lowercase_ , **lowercase_ ) -> BatchEncoding:
lowerCAmelCase : Optional[Any] = kwargs.get("""is_split_into_words""" , lowercase_ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*lowercase_ , **lowercase_ )
def _snake_case ( self , lowercase_ , lowercase_ = None ) -> Tuple[str]:
lowerCAmelCase : Union[str, Any] = self._tokenizer.model.save(lowercase_ , name=lowercase_ )
return tuple(lowercase_ )
def _snake_case ( self , lowercase_ ) -> List[int]:
lowerCAmelCase : int = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowercase_ , add_special_tokens=lowercase_ ) + [self.eos_token_id] )
if len(lowercase_ ) > self.model_max_length:
lowerCAmelCase : Any = input_ids[-self.model_max_length :]
return input_ids
| 693 |
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
lowerCAmelCase : int =logging.getLogger()
lowerCAmelCase : str =logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class _a ( snake_case_ ):
def _snake_case ( self , lowercase_ ) -> List[Any]:
os.makedirs(lowercase_ , exist_ok=lowercase_ )
lowerCAmelCase : int = {"""source""": """What is love ?""", """target""": """life"""}
lowerCAmelCase : Optional[Any] = {"""train""": 12, """val""": 2, """test""": 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
lowerCAmelCase : Tuple = """\n""".join([contents[field]] * n_lines[split] )
with open(os.path.join(lowercase_ , f"""{split}.{field}""" ) , """w""" ) as f:
f.write(lowercase_ )
def _snake_case ( self , lowercase_ , lowercase_ = "pytorch" ) -> str:
lowerCAmelCase : Dict = self.get_auto_remove_tmp_dir()
lowerCAmelCase : Union[str, Any] = os.path.join(lowercase_ , """output""" )
lowerCAmelCase : Dict = os.path.join(lowercase_ , """data""" )
self._create_dummy_data(data_dir=lowercase_ )
lowerCAmelCase : str = f"""
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
""".split()
if gpus > 0:
testargs.append(f"""--gpus={gpus}""" )
if is_apex_available():
testargs.append("""--fp16""" )
else:
testargs.append("""--gpus=0""" )
testargs.append("""--distributed_backend=ddp_cpu""" )
testargs.append("""--num_processes=2""" )
lowerCAmelCase : Optional[int] = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(lowercase_ , env=self.get_env() )
lowerCAmelCase : Union[str, Any] = os.path.join(lowercase_ , """metrics.json""" )
with open(lowercase_ ) as f:
lowerCAmelCase : List[str] = json.load(lowercase_ )
return result
@require_torch_gpu
def _snake_case ( self ) -> Any:
lowerCAmelCase : Tuple = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_multi_gpu
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase : Dict = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_gpu
@require_ray
def _snake_case ( self ) -> int:
lowerCAmelCase : Optional[Any] = self._run_finetune(gpus=1 , distributed_retriever="""ray""" )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_multi_gpu
@require_ray
def _snake_case ( self ) -> Optional[Any]:
lowerCAmelCase : Optional[Any] = self._run_finetune(gpus=1 , distributed_retriever="""ray""" )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
| 693 | 1 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
lowerCAmelCase : str =[
'EAGER',
'AOT_EAGER',
'INDUCTOR',
'NVFUSER',
'AOT_NVFUSER',
'AOT_CUDAGRAPHS',
'OFI',
'FX2TRT',
'ONNXRT',
'IPEX',
]
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ,SCREAMING_SNAKE_CASE__=None ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : Any = True
while ask_again:
lowerCAmelCase : Any = input(SCREAMING_SNAKE_CASE__ )
try:
if default is not None and len(SCREAMING_SNAKE_CASE__ ) == 0:
return default
return convert_value(SCREAMING_SNAKE_CASE__ ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=[] ,SCREAMING_SNAKE_CASE__=None ,SCREAMING_SNAKE_CASE__=0 ):
'''simple docstring'''
lowerCAmelCase : str = BulletMenu(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Optional[Any] = menu.run(default_choice=SCREAMING_SNAKE_CASE__ )
return convert_value(SCREAMING_SNAKE_CASE__ ) if convert_value is not None else result
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : List[Any] = int(SCREAMING_SNAKE_CASE__ )
return ComputeEnvironment(["""LOCAL_MACHINE""", """AMAZON_SAGEMAKER"""][value] )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = int(SCREAMING_SNAKE_CASE__ )
return DistributedType(["""NO""", """MULTI_CPU""", """MULTI_XPU""", """MULTI_GPU""", """MULTI_NPU""", """TPU"""][value] )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Any = int(SCREAMING_SNAKE_CASE__ )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = int(SCREAMING_SNAKE_CASE__ )
return PrecisionType(["""no""", """fp16""", """bf16""", """fp8"""][value] )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Tuple = int(SCREAMING_SNAKE_CASE__ )
return SageMakerDistributedType(["""NO""", """DATA_PARALLEL""", """MODEL_PARALLEL"""][value] )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return {"yes": True, "no": False}[value.lower()]
class _a ( argparse.RawDescriptionHelpFormatter ):
def _snake_case ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> List[str]:
lowerCAmelCase : str = super()._format_usage(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
lowerCAmelCase : int = usage.replace("""<command> [<args>] """ , """""" )
return usage
| 693 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Optional[int] =logging.get_logger(__name__)
lowerCAmelCase : Optional[int] ={
'transfo-xl-wt103': 'https://huggingface.co/transfo-xl-wt103/resolve/main/config.json',
}
class _a ( snake_case_ ):
_UpperCamelCase: Tuple = "transfo-xl"
_UpperCamelCase: str = ["mems"]
_UpperCamelCase: Dict = {
"n_token": "vocab_size",
"hidden_size": "d_model",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , lowercase_=267735 , lowercase_=[20000, 40000, 200000] , lowercase_=1024 , lowercase_=1024 , lowercase_=16 , lowercase_=64 , lowercase_=4096 , lowercase_=4 , lowercase_=False , lowercase_=18 , lowercase_=1600 , lowercase_=1000 , lowercase_=True , lowercase_=True , lowercase_=0 , lowercase_=-1 , lowercase_=True , lowercase_=0.1 , lowercase_=0.0 , lowercase_=True , lowercase_="normal" , lowercase_=0.0_1 , lowercase_=0.0_1 , lowercase_=0.0_2 , lowercase_=1e-5 , lowercase_=0 , **lowercase_ , ) -> Optional[int]:
lowerCAmelCase : List[str] = vocab_size
lowerCAmelCase : Union[str, Any] = []
self.cutoffs.extend(lowercase_ )
if proj_share_all_but_first:
lowerCAmelCase : Optional[int] = [False] + [True] * len(self.cutoffs )
else:
lowerCAmelCase : List[str] = [False] + [False] * len(self.cutoffs )
lowerCAmelCase : Optional[int] = d_model
lowerCAmelCase : List[Any] = d_embed
lowerCAmelCase : Union[str, Any] = d_head
lowerCAmelCase : List[Any] = d_inner
lowerCAmelCase : Optional[int] = div_val
lowerCAmelCase : List[Any] = pre_lnorm
lowerCAmelCase : Dict = n_layer
lowerCAmelCase : Tuple = n_head
lowerCAmelCase : Any = mem_len
lowerCAmelCase : Union[str, Any] = same_length
lowerCAmelCase : List[Any] = attn_type
lowerCAmelCase : int = clamp_len
lowerCAmelCase : List[str] = sample_softmax
lowerCAmelCase : Optional[int] = adaptive
lowerCAmelCase : Dict = dropout
lowerCAmelCase : Optional[Any] = dropatt
lowerCAmelCase : List[str] = untie_r
lowerCAmelCase : List[str] = init
lowerCAmelCase : Tuple = init_range
lowerCAmelCase : str = proj_init_std
lowerCAmelCase : str = init_std
lowerCAmelCase : Optional[int] = layer_norm_epsilon
super().__init__(eos_token_id=lowercase_ , **lowercase_ )
@property
def _snake_case ( self ) -> Optional[Any]:
# Message copied from Transformer-XL documentation
logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def _snake_case ( self , lowercase_ ) -> Dict:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 693 | 1 |
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class _a ( unittest.TestCase ):
def _snake_case ( self ) -> List[str]:
lowerCAmelCase : Optional[Any] = tempfile.mkdtemp()
lowerCAmelCase : Optional[Any] = SamImageProcessor()
lowerCAmelCase : Union[str, Any] = SamProcessor(lowercase_ )
processor.save_pretrained(self.tmpdirname )
def _snake_case ( self , **lowercase_ ) -> Tuple:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase_ ).image_processor
def _snake_case ( self ) -> str:
shutil.rmtree(self.tmpdirname )
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase : Tuple = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCAmelCase : int = [Image.fromarray(np.moveaxis(lowercase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _snake_case ( self ) -> Optional[Any]:
lowerCAmelCase : Dict = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase : List[str] = self.get_image_processor(do_normalize=lowercase_ , padding_value=1.0 )
lowerCAmelCase : Dict = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=lowercase_ , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase_ )
def _snake_case ( self ) -> str:
lowerCAmelCase : Optional[int] = self.get_image_processor()
lowerCAmelCase : Union[str, Any] = SamProcessor(image_processor=lowercase_ )
lowerCAmelCase : Tuple = self.prepare_image_inputs()
lowerCAmelCase : Any = image_processor(lowercase_ , return_tensors="""np""" )
lowerCAmelCase : Optional[int] = processor(images=lowercase_ , return_tensors="""np""" )
input_feat_extract.pop("""original_sizes""" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("""reshaped_input_sizes""" ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
@require_torch
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase : str = self.get_image_processor()
lowerCAmelCase : List[str] = SamProcessor(image_processor=lowercase_ )
lowerCAmelCase : str = [torch.ones((1, 3, 5, 5) )]
lowerCAmelCase : str = [[1764, 2646]]
lowerCAmelCase : Optional[int] = [[683, 1024]]
lowerCAmelCase : Optional[Any] = processor.post_process_masks(lowercase_ , lowercase_ , lowercase_ )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
lowerCAmelCase : Optional[int] = processor.post_process_masks(
lowercase_ , torch.tensor(lowercase_ ) , torch.tensor(lowercase_ ) )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
# should also work with np
lowerCAmelCase : Tuple = [np.ones((1, 3, 5, 5) )]
lowerCAmelCase : Dict = processor.post_process_masks(lowercase_ , np.array(lowercase_ ) , np.array(lowercase_ ) )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
lowerCAmelCase : Optional[Any] = [[1, 0], [0, 1]]
with self.assertRaises(lowercase_ ):
lowerCAmelCase : Dict = processor.post_process_masks(lowercase_ , np.array(lowercase_ ) , np.array(lowercase_ ) )
@require_vision
@require_tf
class _a ( unittest.TestCase ):
def _snake_case ( self ) -> Any:
lowerCAmelCase : Dict = tempfile.mkdtemp()
lowerCAmelCase : Optional[Any] = SamImageProcessor()
lowerCAmelCase : List[str] = SamProcessor(lowercase_ )
processor.save_pretrained(self.tmpdirname )
def _snake_case ( self , **lowercase_ ) -> Union[str, Any]:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase_ ).image_processor
def _snake_case ( self ) -> Optional[Any]:
shutil.rmtree(self.tmpdirname )
def _snake_case ( self ) -> Any:
lowerCAmelCase : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCAmelCase : Optional[Any] = [Image.fromarray(np.moveaxis(lowercase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _snake_case ( self ) -> int:
lowerCAmelCase : List[str] = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase : int = self.get_image_processor(do_normalize=lowercase_ , padding_value=1.0 )
lowerCAmelCase : List[str] = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=lowercase_ , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase_ )
def _snake_case ( self ) -> str:
lowerCAmelCase : Optional[int] = self.get_image_processor()
lowerCAmelCase : Tuple = SamProcessor(image_processor=lowercase_ )
lowerCAmelCase : str = self.prepare_image_inputs()
lowerCAmelCase : List[Any] = image_processor(lowercase_ , return_tensors="""np""" )
lowerCAmelCase : Optional[Any] = processor(images=lowercase_ , return_tensors="""np""" )
input_feat_extract.pop("""original_sizes""" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("""reshaped_input_sizes""" ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
@require_tf
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase : Optional[Any] = self.get_image_processor()
lowerCAmelCase : List[str] = SamProcessor(image_processor=lowercase_ )
lowerCAmelCase : Optional[int] = [tf.ones((1, 3, 5, 5) )]
lowerCAmelCase : Optional[Any] = [[1764, 2646]]
lowerCAmelCase : List[str] = [[683, 1024]]
lowerCAmelCase : List[Any] = processor.post_process_masks(lowercase_ , lowercase_ , lowercase_ , return_tensors="""tf""" )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
lowerCAmelCase : Any = processor.post_process_masks(
lowercase_ , tf.convert_to_tensor(lowercase_ ) , tf.convert_to_tensor(lowercase_ ) , return_tensors="""tf""" , )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
# should also work with np
lowerCAmelCase : List[Any] = [np.ones((1, 3, 5, 5) )]
lowerCAmelCase : List[Any] = processor.post_process_masks(
lowercase_ , np.array(lowercase_ ) , np.array(lowercase_ ) , return_tensors="""tf""" )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
lowerCAmelCase : str = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
lowerCAmelCase : Union[str, Any] = processor.post_process_masks(
lowercase_ , np.array(lowercase_ ) , np.array(lowercase_ ) , return_tensors="""tf""" )
@require_vision
@require_torchvision
class _a ( unittest.TestCase ):
def _snake_case ( self ) -> int:
lowerCAmelCase : Dict = tempfile.mkdtemp()
lowerCAmelCase : List[Any] = SamImageProcessor()
lowerCAmelCase : int = SamProcessor(lowercase_ )
processor.save_pretrained(self.tmpdirname )
def _snake_case ( self , **lowercase_ ) -> List[str]:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase_ ).image_processor
def _snake_case ( self ) -> int:
shutil.rmtree(self.tmpdirname )
def _snake_case ( self ) -> List[Any]:
lowerCAmelCase : Tuple = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCAmelCase : Optional[Any] = [Image.fromarray(np.moveaxis(lowercase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def _snake_case ( self ) -> Optional[Any]:
lowerCAmelCase : str = self.get_image_processor()
lowerCAmelCase : Union[str, Any] = SamProcessor(image_processor=lowercase_ )
lowerCAmelCase : Tuple = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa )
lowerCAmelCase : str = [tf.convert_to_tensor(lowercase_ )]
lowerCAmelCase : str = [torch.tensor(lowercase_ )]
lowerCAmelCase : Union[str, Any] = [[1764, 2646]]
lowerCAmelCase : int = [[683, 1024]]
lowerCAmelCase : List[str] = processor.post_process_masks(
lowercase_ , lowercase_ , lowercase_ , return_tensors="""tf""" )
lowerCAmelCase : Optional[int] = processor.post_process_masks(
lowercase_ , lowercase_ , lowercase_ , return_tensors="""pt""" )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def _snake_case ( self ) -> List[str]:
lowerCAmelCase : Dict = self.get_image_processor()
lowerCAmelCase : str = SamProcessor(image_processor=lowercase_ )
lowerCAmelCase : Any = self.prepare_image_inputs()
lowerCAmelCase : Dict = image_processor(lowercase_ , return_tensors="""pt""" )["""pixel_values"""].numpy()
lowerCAmelCase : Tuple = processor(images=lowercase_ , return_tensors="""pt""" )["""pixel_values"""].numpy()
lowerCAmelCase : Union[str, Any] = image_processor(lowercase_ , return_tensors="""tf""" )["""pixel_values"""].numpy()
lowerCAmelCase : Any = processor(images=lowercase_ , return_tensors="""tf""" )["""pixel_values"""].numpy()
self.assertTrue(np.allclose(lowercase_ , lowercase_ ) )
self.assertTrue(np.allclose(lowercase_ , lowercase_ ) )
self.assertTrue(np.allclose(lowercase_ , lowercase_ ) )
| 693 |
import torch
from diffusers import DiffusionPipeline
class _a ( snake_case_ ):
def __init__( self , lowercase_ , lowercase_ ) -> int:
super().__init__()
self.register_modules(unet=lowercase_ , scheduler=lowercase_ )
def __call__( self ) -> List[Any]:
lowerCAmelCase : Union[str, Any] = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
lowerCAmelCase : Union[str, Any] = 1
lowerCAmelCase : Dict = self.unet(lowercase_ , lowercase_ ).sample
lowerCAmelCase : str = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ ).prev_sample
lowerCAmelCase : Dict = scheduler_output - scheduler_output + torch.ones_like(lowercase_ )
return result
| 693 | 1 |
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
lowerCAmelCase : Optional[List[str]] =None
lowerCAmelCase : List[str] ='<' if sys.byteorder == 'little' else '>'
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
lowerCAmelCase : str =[
np.dtype('|b1'),
np.dtype('|u1'),
np.dtype('<u2'),
np.dtype('>u2'),
np.dtype('<i2'),
np.dtype('>i2'),
np.dtype('<u4'),
np.dtype('>u4'),
np.dtype('<i4'),
np.dtype('>i4'),
np.dtype('<f4'),
np.dtype('>f4'),
np.dtype('<f8'),
np.dtype('>f8'),
]
@dataclass
class _a :
_UpperCamelCase: bool = True
_UpperCamelCase: Optional[str] = None
# Automatically constructed
_UpperCamelCase: ClassVar[str] = "PIL.Image.Image"
_UpperCamelCase: ClassVar[Any] = pa.struct({"bytes": pa.binary(), "path": pa.string()} )
_UpperCamelCase: str = field(default="Image" , init=snake_case_ , repr=snake_case_ )
def __call__( self ) -> Optional[int]:
return self.pa_type
def _snake_case ( self , lowercase_ ) -> dict:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
if isinstance(lowercase_ , lowercase_ ):
lowerCAmelCase : str = np.array(lowercase_ )
if isinstance(lowercase_ , lowercase_ ):
return {"path": value, "bytes": None}
elif isinstance(lowercase_ , lowercase_ ):
return {"path": None, "bytes": value}
elif isinstance(lowercase_ , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(lowercase_ )
elif isinstance(lowercase_ , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(lowercase_ )
elif value.get("""path""" ) is not None and os.path.isfile(value["""path"""] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("""path""" )}
elif value.get("""bytes""" ) is not None or value.get("""path""" ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("""bytes""" ), "path": value.get("""path""" )}
else:
raise ValueError(
f"""An image sample should have one of 'path' or 'bytes' but they are missing or None in {value}.""" )
def _snake_case ( self , lowercase_ , lowercase_=None ) -> "PIL.Image.Image":
if not self.decode:
raise RuntimeError("""Decoding is disabled for this feature. Please use Image(decode=True) instead.""" )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support decoding images, please install 'Pillow'.""" )
if token_per_repo_id is None:
lowerCAmelCase : str = {}
lowerCAmelCase , lowerCAmelCase : Optional[Any] = value["""path"""], value["""bytes"""]
if bytes_ is None:
if path is None:
raise ValueError(f"""An image should have one of 'path' or 'bytes' but both are None in {value}.""" )
else:
if is_local_path(lowercase_ ):
lowerCAmelCase : int = PIL.Image.open(lowercase_ )
else:
lowerCAmelCase : Optional[Any] = path.split("""::""" )[-1]
try:
lowerCAmelCase : Dict = string_to_dict(lowercase_ , config.HUB_DATASETS_URL )["""repo_id"""]
lowerCAmelCase : Optional[int] = token_per_repo_id.get(lowercase_ )
except ValueError:
lowerCAmelCase : Optional[int] = None
with xopen(lowercase_ , """rb""" , use_auth_token=lowercase_ ) as f:
lowerCAmelCase : int = BytesIO(f.read() )
lowerCAmelCase : Any = PIL.Image.open(bytes_ )
else:
lowerCAmelCase : List[str] = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def _snake_case ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("""binary""" ),
"path": Value("""string""" ),
}
)
def _snake_case ( self , lowercase_ ) -> pa.StructArray:
if pa.types.is_string(storage.type ):
lowerCAmelCase : Optional[Any] = pa.array([None] * len(lowercase_ ) , type=pa.binary() )
lowerCAmelCase : Union[str, Any] = pa.StructArray.from_arrays([bytes_array, storage] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
lowerCAmelCase : List[Any] = pa.array([None] * len(lowercase_ ) , type=pa.string() )
lowerCAmelCase : List[Any] = pa.StructArray.from_arrays([storage, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("""bytes""" ) >= 0:
lowerCAmelCase : int = storage.field("""bytes""" )
else:
lowerCAmelCase : Optional[Any] = pa.array([None] * len(lowercase_ ) , type=pa.binary() )
if storage.type.get_field_index("""path""" ) >= 0:
lowerCAmelCase : Optional[int] = storage.field("""path""" )
else:
lowerCAmelCase : str = pa.array([None] * len(lowercase_ ) , type=pa.string() )
lowerCAmelCase : List[str] = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
lowerCAmelCase : Optional[int] = pa.array(
[encode_np_array(np.array(lowercase_ ) )["""bytes"""] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
lowerCAmelCase : Dict = pa.array([None] * len(lowercase_ ) , type=pa.string() )
lowerCAmelCase : Any = pa.StructArray.from_arrays(
[bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(lowercase_ , self.pa_type )
def _snake_case ( self , lowercase_ ) -> pa.StructArray:
@no_op_if_value_is_null
def path_to_bytes(lowercase_ ):
with xopen(lowercase_ , """rb""" ) as f:
lowerCAmelCase : int = f.read()
return bytes_
lowerCAmelCase : Tuple = pa.array(
[
(path_to_bytes(x["""path"""] ) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
lowerCAmelCase : Optional[int] = pa.array(
[os.path.basename(lowercase_ ) if path is not None else None for path in storage.field("""path""" ).to_pylist()] , type=pa.string() , )
lowerCAmelCase : List[str] = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(lowercase_ , self.pa_type )
def _UpperCAmelCase ( ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
lowerCAmelCase : Dict = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = BytesIO()
if image.format in list_image_compression_formats():
lowerCAmelCase : str = image.format
else:
lowerCAmelCase : Optional[int] = """PNG""" if image.mode in ["""1""", """L""", """LA""", """RGB""", """RGBA"""] else """TIFF"""
image.save(SCREAMING_SNAKE_CASE__ ,format=SCREAMING_SNAKE_CASE__ )
return buffer.getvalue()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if hasattr(SCREAMING_SNAKE_CASE__ ,"""filename""" ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(SCREAMING_SNAKE_CASE__ )}
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
lowerCAmelCase : Optional[int] = array.dtype
lowerCAmelCase : str = dtype.byteorder if dtype.byteorder != """=""" else _NATIVE_BYTEORDER
lowerCAmelCase : Union[str, Any] = dtype.kind
lowerCAmelCase : int = dtype.itemsize
lowerCAmelCase : Dict = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
lowerCAmelCase : Union[str, Any] = np.dtype("""|u1""" )
if dtype_kind not in ["u", "i"]:
raise TypeError(
F"""Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.""" )
if dtype is not dest_dtype:
warnings.warn(F"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
lowerCAmelCase : Any = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
lowerCAmelCase : List[str] = dtype_byteorder + dtype_kind + str(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : List[Any] = np.dtype(SCREAMING_SNAKE_CASE__ )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(F"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
F"""Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}""" )
lowerCAmelCase : Dict = PIL.Image.fromarray(array.astype(SCREAMING_SNAKE_CASE__ ) )
return {"path": None, "bytes": image_to_bytes(SCREAMING_SNAKE_CASE__ )}
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
if objs:
lowerCAmelCase , lowerCAmelCase : str = first_non_null_value(SCREAMING_SNAKE_CASE__ )
if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(SCREAMING_SNAKE_CASE__ ,np.ndarray ):
lowerCAmelCase : Any = no_op_if_value_is_null(SCREAMING_SNAKE_CASE__ )
return [obj_to_image_dict_func(SCREAMING_SNAKE_CASE__ ) for obj in objs]
elif isinstance(SCREAMING_SNAKE_CASE__ ,PIL.Image.Image ):
lowerCAmelCase : Dict = no_op_if_value_is_null(SCREAMING_SNAKE_CASE__ )
return [obj_to_image_dict_func(SCREAMING_SNAKE_CASE__ ) for obj in objs]
else:
return objs
else:
return objs
| 693 |
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def _UpperCAmelCase ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
requests.request("""GET""" ,"""https://huggingface.co""" )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request("""GET""" ,"""https://huggingface.co""" ,timeout=1.0 )
@pytest.mark.integration
def _UpperCAmelCase ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request("""GET""" ,"""https://huggingface.co""" )
def _UpperCAmelCase ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
http_head("""https://huggingface.co""" )
| 693 | 1 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if n == 1 or not isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
return 0
elif n == 2:
return 1
else:
lowerCAmelCase : Dict = [0, 1]
for i in range(2 ,n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Any = 0
lowerCAmelCase : Dict = 2
while digits < n:
index += 1
lowerCAmelCase : Union[str, Any] = len(str(fibonacci(SCREAMING_SNAKE_CASE__ ) ) )
return index
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ = 1_0_0_0 ):
'''simple docstring'''
return fibonacci_digits_index(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 693 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class _a ( unittest.TestCase ):
def __init__( self , lowercase_ , lowercase_=7 , lowercase_=3 , lowercase_=30 , lowercase_=400 , lowercase_=True , lowercase_=None , lowercase_=True , lowercase_=1 / 255 , lowercase_=True , lowercase_=[0.5, 0.5, 0.5] , lowercase_=[0.5, 0.5, 0.5] , lowercase_=True , ) -> Tuple:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
lowerCAmelCase : Optional[Any] = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333}
lowerCAmelCase : Optional[int] = parent
lowerCAmelCase : Optional[int] = batch_size
lowerCAmelCase : Dict = num_channels
lowerCAmelCase : str = min_resolution
lowerCAmelCase : Optional[Any] = max_resolution
lowerCAmelCase : Optional[int] = do_resize
lowerCAmelCase : List[str] = size
lowerCAmelCase : Dict = do_rescale
lowerCAmelCase : Union[str, Any] = rescale_factor
lowerCAmelCase : int = do_normalize
lowerCAmelCase : Union[str, Any] = image_mean
lowerCAmelCase : Dict = image_std
lowerCAmelCase : Optional[int] = do_pad
def _snake_case ( self ) -> Any:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def _snake_case ( self , lowercase_ , lowercase_=False ) -> List[Any]:
if not batched:
lowerCAmelCase : Tuple = image_inputs[0]
if isinstance(lowercase_ , Image.Image ):
lowerCAmelCase , lowerCAmelCase : Dict = image.size
else:
lowerCAmelCase , lowerCAmelCase : Tuple = image.shape[1], image.shape[2]
if w < h:
lowerCAmelCase : Union[str, Any] = int(self.size["""shortest_edge"""] * h / w )
lowerCAmelCase : Optional[Any] = self.size["""shortest_edge"""]
elif w > h:
lowerCAmelCase : List[Any] = self.size["""shortest_edge"""]
lowerCAmelCase : List[Any] = int(self.size["""shortest_edge"""] * w / h )
else:
lowerCAmelCase : Optional[int] = self.size["""shortest_edge"""]
lowerCAmelCase : List[str] = self.size["""shortest_edge"""]
else:
lowerCAmelCase : Optional[int] = []
for image in image_inputs:
lowerCAmelCase , lowerCAmelCase : int = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCAmelCase : Any = max(lowercase_ , key=lambda lowercase_ : item[0] )[0]
lowerCAmelCase : Union[str, Any] = max(lowercase_ , key=lambda lowercase_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _a ( snake_case_ , unittest.TestCase ):
_UpperCamelCase: Optional[Any] = DetrImageProcessor if is_vision_available() else None
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase : List[str] = DetrImageProcessingTester(self )
@property
def _snake_case ( self ) -> str:
return self.image_processor_tester.prepare_image_processor_dict()
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase_ , """image_mean""" ) )
self.assertTrue(hasattr(lowercase_ , """image_std""" ) )
self.assertTrue(hasattr(lowercase_ , """do_normalize""" ) )
self.assertTrue(hasattr(lowercase_ , """do_rescale""" ) )
self.assertTrue(hasattr(lowercase_ , """rescale_factor""" ) )
self.assertTrue(hasattr(lowercase_ , """do_resize""" ) )
self.assertTrue(hasattr(lowercase_ , """size""" ) )
self.assertTrue(hasattr(lowercase_ , """do_pad""" ) )
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333} )
self.assertEqual(image_processor.do_pad , lowercase_ )
lowerCAmelCase : Optional[Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowercase_ )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad , lowercase_ )
def _snake_case ( self ) -> List[Any]:
pass
def _snake_case ( self ) -> List[Any]:
# Initialize image_processing
lowerCAmelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , Image.Image )
# Test not batched input
lowerCAmelCase : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
lowerCAmelCase , lowerCAmelCase : Tuple = self.image_processor_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase , lowerCAmelCase : str = self.image_processor_tester.get_expected_values(lowercase_ , batched=lowercase_ )
lowerCAmelCase : Optional[int] = image_processing(lowercase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _snake_case ( self ) -> Optional[int]:
# Initialize image_processing
lowerCAmelCase : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , numpify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , np.ndarray )
# Test not batched input
lowerCAmelCase : List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
lowerCAmelCase , lowerCAmelCase : Dict = self.image_processor_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase : List[Any] = image_processing(lowercase_ , return_tensors="""pt""" ).pixel_values
lowerCAmelCase , lowerCAmelCase : int = self.image_processor_tester.get_expected_values(lowercase_ , batched=lowercase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _snake_case ( self ) -> List[str]:
# Initialize image_processing
lowerCAmelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , torchify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , torch.Tensor )
# Test not batched input
lowerCAmelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
lowerCAmelCase , lowerCAmelCase : str = self.image_processor_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase : str = image_processing(lowercase_ , return_tensors="""pt""" ).pixel_values
lowerCAmelCase , lowerCAmelCase : List[str] = self.image_processor_tester.get_expected_values(lowercase_ , batched=lowercase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _snake_case ( self ) -> int:
# prepare image and target
lowerCAmelCase : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
lowerCAmelCase : str = json.loads(f.read() )
lowerCAmelCase : List[Any] = {"""image_id""": 39769, """annotations""": target}
# encode them
lowerCAmelCase : Dict = DetrImageProcessor.from_pretrained("""facebook/detr-resnet-50""" )
lowerCAmelCase : List[str] = image_processing(images=lowercase_ , annotations=lowercase_ , return_tensors="""pt""" )
# verify pixel values
lowerCAmelCase : Union[str, Any] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , lowercase_ )
lowerCAmelCase : Union[str, Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , lowercase_ , atol=1e-4 ) )
# verify area
lowerCAmelCase : List[str] = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , lowercase_ ) )
# verify boxes
lowerCAmelCase : Tuple = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , lowercase_ )
lowerCAmelCase : Dict = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , lowercase_ , atol=1e-3 ) )
# verify image_id
lowerCAmelCase : List[Any] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , lowercase_ ) )
# verify is_crowd
lowerCAmelCase : Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , lowercase_ ) )
# verify class_labels
lowerCAmelCase : str = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , lowercase_ ) )
# verify orig_size
lowerCAmelCase : int = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , lowercase_ ) )
# verify size
lowerCAmelCase : str = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , lowercase_ ) )
@slow
def _snake_case ( self ) -> int:
# prepare image, target and masks_path
lowerCAmelCase : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
lowerCAmelCase : Any = json.loads(f.read() )
lowerCAmelCase : Optional[Any] = {"""file_name""": """000000039769.png""", """image_id""": 39769, """segments_info""": target}
lowerCAmelCase : List[str] = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
lowerCAmelCase : Any = DetrImageProcessor.from_pretrained("""facebook/detr-resnet-50-panoptic""" )
lowerCAmelCase : Tuple = image_processing(images=lowercase_ , annotations=lowercase_ , masks_path=lowercase_ , return_tensors="""pt""" )
# verify pixel values
lowerCAmelCase : Tuple = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , lowercase_ )
lowerCAmelCase : str = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , lowercase_ , atol=1e-4 ) )
# verify area
lowerCAmelCase : Union[str, Any] = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , lowercase_ ) )
# verify boxes
lowerCAmelCase : Optional[int] = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , lowercase_ )
lowerCAmelCase : Union[str, Any] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , lowercase_ , atol=1e-3 ) )
# verify image_id
lowerCAmelCase : Tuple = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , lowercase_ ) )
# verify is_crowd
lowerCAmelCase : Any = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , lowercase_ ) )
# verify class_labels
lowerCAmelCase : Tuple = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , lowercase_ ) )
# verify masks
lowerCAmelCase : Union[str, Any] = 822873
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , lowercase_ )
# verify orig_size
lowerCAmelCase : str = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , lowercase_ ) )
# verify size
lowerCAmelCase : List[str] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , lowercase_ ) )
| 693 | 1 |
# Imports
import numpy as np
class _a :
def __init__( self , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None ) -> List[Any]:
self.set_matricies(red=lowercase_ , green=lowercase_ , blue=lowercase_ , red_edge=lowercase_ , nir=lowercase_ )
def _snake_case ( self , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None ) -> Union[str, Any]:
if red is not None:
lowerCAmelCase : str = red
if green is not None:
lowerCAmelCase : Optional[int] = green
if blue is not None:
lowerCAmelCase : Optional[int] = blue
if red_edge is not None:
lowerCAmelCase : Tuple = red_edge
if nir is not None:
lowerCAmelCase : Union[str, Any] = nir
return True
def _snake_case ( self , lowercase_="" , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None ) -> Optional[int]:
self.set_matricies(red=lowercase_ , green=lowercase_ , blue=lowercase_ , red_edge=lowercase_ , nir=lowercase_ )
lowerCAmelCase : int = {
"""ARVI2""": self.arvaa,
"""CCCI""": self.ccci,
"""CVI""": self.cvi,
"""GLI""": self.gli,
"""NDVI""": self.ndvi,
"""BNDVI""": self.bndvi,
"""redEdgeNDVI""": self.red_edge_ndvi,
"""GNDVI""": self.gndvi,
"""GBNDVI""": self.gbndvi,
"""GRNDVI""": self.grndvi,
"""RBNDVI""": self.rbndvi,
"""PNDVI""": self.pndvi,
"""ATSAVI""": self.atsavi,
"""BWDRVI""": self.bwdrvi,
"""CIgreen""": self.ci_green,
"""CIrededge""": self.ci_rededge,
"""CI""": self.ci,
"""CTVI""": self.ctvi,
"""GDVI""": self.gdvi,
"""EVI""": self.evi,
"""GEMI""": self.gemi,
"""GOSAVI""": self.gosavi,
"""GSAVI""": self.gsavi,
"""Hue""": self.hue,
"""IVI""": self.ivi,
"""IPVI""": self.ipvi,
"""I""": self.i,
"""RVI""": self.rvi,
"""MRVI""": self.mrvi,
"""MSAVI""": self.m_savi,
"""NormG""": self.norm_g,
"""NormNIR""": self.norm_nir,
"""NormR""": self.norm_r,
"""NGRDI""": self.ngrdi,
"""RI""": self.ri,
"""S""": self.s,
"""IF""": self._if,
"""DVI""": self.dvi,
"""TVI""": self.tvi,
"""NDRE""": self.ndre,
}
try:
return funcs[index]()
except KeyError:
print("""Index not in the list!""" )
return False
def _snake_case ( self ) -> Dict:
return -0.1_8 + (1.1_7 * ((self.nir - self.red) / (self.nir + self.red)))
def _snake_case ( self ) -> Optional[Any]:
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def _snake_case ( self ) -> List[str]:
return self.nir * (self.red / (self.green**2))
def _snake_case ( self ) -> Tuple:
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def _snake_case ( self ) -> Optional[int]:
return (self.nir - self.red) / (self.nir + self.red)
def _snake_case ( self ) -> List[str]:
return (self.nir - self.blue) / (self.nir + self.blue)
def _snake_case ( self ) -> int:
return (self.redEdge - self.red) / (self.redEdge + self.red)
def _snake_case ( self ) -> Optional[Any]:
return (self.nir - self.green) / (self.nir + self.green)
def _snake_case ( self ) -> Tuple:
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def _snake_case ( self ) -> Tuple:
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def _snake_case ( self ) -> int:
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def _snake_case ( self ) -> List[str]:
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def _snake_case ( self , lowercase_=0.0_8 , lowercase_=1.2_2 , lowercase_=0.0_3 ) -> int:
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def _snake_case ( self ) -> Optional[Any]:
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def _snake_case ( self ) -> Any:
return (self.nir / self.green) - 1
def _snake_case ( self ) -> List[Any]:
return (self.nir / self.redEdge) - 1
def _snake_case ( self ) -> str:
return (self.red - self.blue) / self.red
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase : Dict = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def _snake_case ( self ) -> Optional[Any]:
return self.nir - self.green
def _snake_case ( self ) -> int:
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def _snake_case ( self ) -> Optional[Any]:
lowerCAmelCase : Tuple = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.2_5 * n) - (self.red - 0.1_2_5) / (1 - self.red)
def _snake_case ( self , lowercase_=0.1_6 ) -> Optional[int]:
return (self.nir - self.green) / (self.nir + self.green + y)
def _snake_case ( self , lowercase_=0.5 ) -> List[str]:
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def _snake_case ( self ) -> Any:
return np.arctan(
((2 * self.red - self.green - self.blue) / 3_0.5) * (self.green - self.blue) )
def _snake_case ( self , lowercase_=None , lowercase_=None ) -> List[Any]:
return (self.nir - b) / (a * self.red)
def _snake_case ( self ) -> Any:
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def _snake_case ( self ) -> str:
return (self.red + self.green + self.blue) / 3_0.5
def _snake_case ( self ) -> Union[str, Any]:
return self.nir / self.red
def _snake_case ( self ) -> Tuple:
return (self.rvi() - 1) / (self.rvi() + 1)
def _snake_case ( self ) -> Dict:
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def _snake_case ( self ) -> List[Any]:
return self.green / (self.nir + self.red + self.green)
def _snake_case ( self ) -> int:
return self.nir / (self.nir + self.red + self.green)
def _snake_case ( self ) -> Dict:
return self.red / (self.nir + self.red + self.green)
def _snake_case ( self ) -> List[Any]:
return (self.green - self.red) / (self.green + self.red)
def _snake_case ( self ) -> Optional[int]:
return (self.red - self.green) / (self.red + self.green)
def _snake_case ( self ) -> Tuple:
lowerCAmelCase : Any = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
lowerCAmelCase : Dict = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def _snake_case ( self ) -> int:
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def _snake_case ( self ) -> List[str]:
return self.nir / self.red
def _snake_case ( self ) -> int:
return (self.ndvi() + 0.5) ** (1 / 2)
def _snake_case ( self ) -> str:
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 693 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Tuple = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Dict = 0
while b > 0:
if b & 1:
lowerCAmelCase : Optional[int] = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 693 | 1 |
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
lowerCAmelCase : str =logging.getLogger(__name__)
class _a ( snake_case_ ):
def _snake_case ( self , lowercase_ , lowercase_ , lowercase_=None , lowercase_=None ) -> Union[str, Any]:
lowerCAmelCase : Tuple = self.layer[current_layer](lowercase_ , lowercase_ , head_mask[current_layer] )
lowerCAmelCase : List[str] = layer_outputs[0]
return hidden_states
@add_start_docstrings(
"The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top." , snake_case_ , )
class _a ( snake_case_ ):
def __init__( self , lowercase_ ) -> Optional[Any]:
super().__init__(lowercase_ )
lowerCAmelCase : Dict = BertEncoderWithPabee(lowercase_ )
self.init_weights()
lowerCAmelCase : Optional[int] = 0
lowerCAmelCase : str = 0
lowerCAmelCase : Optional[Any] = 0
lowerCAmelCase : Tuple = 0
def _snake_case ( self , lowercase_ ) -> List[str]:
lowerCAmelCase : Optional[Any] = threshold
def _snake_case ( self , lowercase_ ) -> List[Any]:
lowerCAmelCase : List[Any] = patience
def _snake_case ( self ) -> List[str]:
lowerCAmelCase : List[Any] = 0
lowerCAmelCase : Optional[Any] = 0
def _snake_case ( self ) -> Optional[Any]:
lowerCAmelCase : List[Any] = self.inference_layers_num / self.inference_instances_num
lowerCAmelCase : Dict = (
f"""*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ="""
f""" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***"""
)
print(lowercase_ )
@add_start_docstrings_to_model_forward(lowercase_ )
def _snake_case ( self , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=False , ) -> str:
if input_ids is not None and inputs_embeds is not None:
raise ValueError("""You cannot specify both input_ids and inputs_embeds at the same time""" )
elif input_ids is not None:
lowerCAmelCase : Optional[int] = input_ids.size()
elif inputs_embeds is not None:
lowerCAmelCase : Optional[Any] = inputs_embeds.size()[:-1]
else:
raise ValueError("""You have to specify either input_ids or inputs_embeds""" )
lowerCAmelCase : Any = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
lowerCAmelCase : str = torch.ones(lowercase_ , device=lowercase_ )
if token_type_ids is None:
lowerCAmelCase : Dict = torch.zeros(lowercase_ , dtype=torch.long , device=lowercase_ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
lowerCAmelCase : torch.Tensor = self.get_extended_attention_mask(lowercase_ , lowercase_ , lowercase_ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Optional[int] = encoder_hidden_states.size()
lowerCAmelCase : str = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
lowerCAmelCase : str = torch.ones(lowercase_ , device=lowercase_ )
lowerCAmelCase : Any = self.invert_attention_mask(lowercase_ )
else:
lowerCAmelCase : List[Any] = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
lowerCAmelCase : Tuple = self.get_head_mask(lowercase_ , self.config.num_hidden_layers )
lowerCAmelCase : Optional[int] = self.embeddings(
input_ids=lowercase_ , position_ids=lowercase_ , token_type_ids=lowercase_ , inputs_embeds=lowercase_ )
lowerCAmelCase : str = embedding_output
if self.training:
lowerCAmelCase : Any = []
for i in range(self.config.num_hidden_layers ):
lowerCAmelCase : Union[str, Any] = self.encoder.adaptive_forward(
lowercase_ , current_layer=lowercase_ , attention_mask=lowercase_ , head_mask=lowercase_ )
lowerCAmelCase : Any = self.pooler(lowercase_ )
lowerCAmelCase : Optional[Any] = output_layers[i](output_dropout(lowercase_ ) )
res.append(lowercase_ )
elif self.patience == 0: # Use all layers for inference
lowerCAmelCase : Optional[Any] = self.encoder(
lowercase_ , attention_mask=lowercase_ , head_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , )
lowerCAmelCase : List[str] = self.pooler(encoder_outputs[0] )
lowerCAmelCase : int = [output_layers[self.config.num_hidden_layers - 1](lowercase_ )]
else:
lowerCAmelCase : Union[str, Any] = 0
lowerCAmelCase : List[Any] = None
lowerCAmelCase : Any = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
lowerCAmelCase : List[Any] = self.encoder.adaptive_forward(
lowercase_ , current_layer=lowercase_ , attention_mask=lowercase_ , head_mask=lowercase_ )
lowerCAmelCase : List[Any] = self.pooler(lowercase_ )
lowerCAmelCase : Optional[int] = output_layers[i](lowercase_ )
if regression:
lowerCAmelCase : str = logits.detach()
if patient_result is not None:
lowerCAmelCase : List[str] = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
lowerCAmelCase : Optional[int] = 0
else:
lowerCAmelCase : List[Any] = logits.detach().argmax(dim=1 )
if patient_result is not None:
lowerCAmelCase : List[str] = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(lowercase_ ) ):
patient_counter += 1
else:
lowerCAmelCase : int = 0
lowerCAmelCase : List[str] = logits
if patient_counter == self.patience:
break
lowerCAmelCase : Tuple = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. " , snake_case_ , )
class _a ( snake_case_ ):
def __init__( self , lowercase_ ) -> int:
super().__init__(lowercase_ )
lowerCAmelCase : Optional[int] = config.num_labels
lowerCAmelCase : Optional[Any] = BertModelWithPabee(lowercase_ )
lowerCAmelCase : List[str] = nn.Dropout(config.hidden_dropout_prob )
lowerCAmelCase : Optional[int] = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(lowercase_ )
def _snake_case ( self , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , ) -> int:
lowerCAmelCase : int = self.bert(
input_ids=lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , position_ids=lowercase_ , head_mask=lowercase_ , inputs_embeds=lowercase_ , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
lowerCAmelCase : Any = (logits[-1],)
if labels is not None:
lowerCAmelCase : List[Any] = None
lowerCAmelCase : Union[str, Any] = 0
for ix, logits_item in enumerate(lowercase_ ):
if self.num_labels == 1:
# We are doing regression
lowerCAmelCase : Tuple = MSELoss()
lowerCAmelCase : str = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
lowerCAmelCase : Union[str, Any] = CrossEntropyLoss()
lowerCAmelCase : Dict = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
lowerCAmelCase : List[Any] = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
lowerCAmelCase : str = (total_loss / total_weights,) + outputs
return outputs
| 693 |
from math import factorial
class _a :
def __init__( self , lowercase_ , lowercase_ ) -> Optional[Any]:
lowerCAmelCase : Union[str, Any] = real
if isinstance(lowercase_ , lowercase_ ):
lowerCAmelCase : Tuple = [1] * rank
else:
lowerCAmelCase : Any = rank
def __repr__( self ) -> int:
return (
f"""{self.real}+"""
f"""{'+'.join(str(lowercase_ )+'E'+str(n+1 )for n,dual in enumerate(self.duals ) )}"""
)
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase : List[Any] = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , lowercase_ )
def __add__( self , lowercase_ ) -> Tuple:
if not isinstance(lowercase_ , lowercase_ ):
return Dual(self.real + other , self.duals )
lowerCAmelCase : int = self.duals.copy()
lowerCAmelCase : Tuple = other.duals.copy()
if len(lowercase_ ) > len(lowercase_ ):
o_dual.extend([1] * (len(lowercase_ ) - len(lowercase_ )) )
elif len(lowercase_ ) < len(lowercase_ ):
s_dual.extend([1] * (len(lowercase_ ) - len(lowercase_ )) )
lowerCAmelCase : List[Any] = []
for i in range(len(lowercase_ ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , lowercase_ )
_UpperCamelCase: List[Any] = __add__
def __sub__( self , lowercase_ ) -> Union[str, Any]:
return self + other * -1
def __mul__( self , lowercase_ ) -> Optional[int]:
if not isinstance(lowercase_ , lowercase_ ):
lowerCAmelCase : Union[str, Any] = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , lowercase_ )
lowerCAmelCase : Union[str, Any] = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , lowercase_ )
_UpperCamelCase: str = __mul__
def __truediv__( self , lowercase_ ) -> Optional[Any]:
if not isinstance(lowercase_ , lowercase_ ):
lowerCAmelCase : List[str] = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , lowercase_ )
raise ValueError
def __floordiv__( self , lowercase_ ) -> int:
if not isinstance(lowercase_ , lowercase_ ):
lowerCAmelCase : List[Any] = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , lowercase_ )
raise ValueError
def __pow__( self , lowercase_ ) -> str:
if n < 0 or isinstance(lowercase_ , lowercase_ ):
raise ValueError("""power must be a positive integer""" )
if n == 0:
return 1
if n == 1:
return self
lowerCAmelCase : int = self
for _ in range(n - 1 ):
x *= self
return x
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if not callable(SCREAMING_SNAKE_CASE__ ):
raise ValueError("""differentiate() requires a function as input for func""" )
if not isinstance(SCREAMING_SNAKE_CASE__ ,(float, int) ):
raise ValueError("""differentiate() requires a float as input for position""" )
if not isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
raise ValueError("""differentiate() requires an int as input for order""" )
lowerCAmelCase : List[Any] = Dual(SCREAMING_SNAKE_CASE__ ,1 )
lowerCAmelCase : Optional[Any] = func(SCREAMING_SNAKE_CASE__ )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return y**2 * y**4
print(differentiate(f, 9, 2))
| 693 | 1 |
import math
from collections.abc import Callable
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : float = xa
lowerCAmelCase : float = xa
while True:
if x_n == x_na or function(SCREAMING_SNAKE_CASE__ ) == function(SCREAMING_SNAKE_CASE__ ):
raise ZeroDivisionError("""float division by zero, could not find root""" )
lowerCAmelCase : float = x_na - (
function(SCREAMING_SNAKE_CASE__ ) / ((function(SCREAMING_SNAKE_CASE__ ) - function(SCREAMING_SNAKE_CASE__ )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 1_0**-5:
return x_na
lowerCAmelCase : Any = x_na
lowerCAmelCase : int = x_na
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return math.pow(SCREAMING_SNAKE_CASE__ ,3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5))
| 693 |
from ..utils import DummyObject, requires_backends
class _a ( metaclass=snake_case_ ):
_UpperCamelCase: List[Any] = ["keras_nlp"]
def __init__( self , *lowercase_ , **lowercase_ ) -> Tuple:
requires_backends(self , ["""keras_nlp"""] )
| 693 | 1 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
raise ValueError("""Input must be an integer""" )
if input_num <= 0:
raise ValueError("""Input must be positive""" )
return sum(
divisor for divisor in range(1 ,input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 693 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 693 | 1 |
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 693 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if p < 2:
raise ValueError("""p should not be less than 2!""" )
elif p == 2:
return True
lowerCAmelCase : List[Any] = 4
lowerCAmelCase : Tuple = (1 << p) - 1
for _ in range(p - 2 ):
lowerCAmelCase : Dict = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 693 | 1 |
from typing import Any
class _a :
def __init__( self , lowercase_ ) -> Any:
lowerCAmelCase : Tuple = data
lowerCAmelCase : Optional[int] = None
def __repr__( self ) -> str:
return f"""Node({self.data})"""
class _a :
def __init__( self ) -> List[str]:
lowerCAmelCase : Union[str, Any] = None
def __iter__( self ) -> Any:
lowerCAmelCase : Dict = self.head
while node:
yield node.data
lowerCAmelCase : Any = node.next
def __len__( self ) -> int:
return sum(1 for _ in self )
def __repr__( self ) -> str:
return "->".join([str(lowercase_ ) for item in self] )
def __getitem__( self , lowercase_ ) -> Any:
if not 0 <= index < len(self ):
raise ValueError("""list index out of range.""" )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self , lowercase_ , lowercase_ ) -> None:
if not 0 <= index < len(self ):
raise ValueError("""list index out of range.""" )
lowerCAmelCase : List[str] = self.head
for _ in range(lowercase_ ):
lowerCAmelCase : Union[str, Any] = current.next
lowerCAmelCase : Optional[int] = data
def _snake_case ( self , lowercase_ ) -> None:
self.insert_nth(len(self ) , lowercase_ )
def _snake_case ( self , lowercase_ ) -> None:
self.insert_nth(0 , lowercase_ )
def _snake_case ( self , lowercase_ , lowercase_ ) -> None:
if not 0 <= index <= len(self ):
raise IndexError("""list index out of range""" )
lowerCAmelCase : Tuple = Node(lowercase_ )
if self.head is None:
lowerCAmelCase : Dict = new_node
elif index == 0:
lowerCAmelCase : str = self.head # link new_node to head
lowerCAmelCase : List[str] = new_node
else:
lowerCAmelCase : int = self.head
for _ in range(index - 1 ):
lowerCAmelCase : List[Any] = temp.next
lowerCAmelCase : Optional[Any] = temp.next
lowerCAmelCase : List[str] = new_node
def _snake_case ( self ) -> None: # print every node data
print(self )
def _snake_case ( self ) -> Any:
return self.delete_nth(0 )
def _snake_case ( self ) -> Any: # delete from tail
return self.delete_nth(len(self ) - 1 )
def _snake_case ( self , lowercase_ = 0 ) -> Any:
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError("""List index out of range.""" )
lowerCAmelCase : Union[str, Any] = self.head # default first node
if index == 0:
lowerCAmelCase : str = self.head.next
else:
lowerCAmelCase : List[str] = self.head
for _ in range(index - 1 ):
lowerCAmelCase : str = temp.next
lowerCAmelCase : Optional[int] = temp.next
lowerCAmelCase : List[str] = temp.next.next
return delete_node.data
def _snake_case ( self ) -> bool:
return self.head is None
def _snake_case ( self ) -> None:
lowerCAmelCase : Union[str, Any] = None
lowerCAmelCase : Union[str, Any] = self.head
while current:
# Store the current node's next node.
lowerCAmelCase : int = current.next
# Make the current node's next point backwards
lowerCAmelCase : Optional[int] = prev
# Make the previous node be the current node
lowerCAmelCase : int = current
# Make the current node the next node (to progress iteration)
lowerCAmelCase : Dict = next_node
# Return prev in order to put the head at the end
lowerCAmelCase : Dict = prev
def _UpperCAmelCase ( ):
'''simple docstring'''
lowerCAmelCase : Tuple = LinkedList()
assert linked_list.is_empty() is True
assert str(SCREAMING_SNAKE_CASE__ ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(1_0 ):
assert len(SCREAMING_SNAKE_CASE__ ) == i
linked_list.insert_nth(SCREAMING_SNAKE_CASE__ ,i + 1 )
assert str(SCREAMING_SNAKE_CASE__ ) == "->".join(str(SCREAMING_SNAKE_CASE__ ) for i in range(1 ,1_1 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(1_1 )
assert str(SCREAMING_SNAKE_CASE__ ) == "->".join(str(SCREAMING_SNAKE_CASE__ ) for i in range(0 ,1_2 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 1_0
assert linked_list.delete_tail() == 1_1
assert len(SCREAMING_SNAKE_CASE__ ) == 9
assert str(SCREAMING_SNAKE_CASE__ ) == "->".join(str(SCREAMING_SNAKE_CASE__ ) for i in range(1 ,1_0 ) )
assert all(linked_list[i] == i + 1 for i in range(0 ,9 ) ) is True
for i in range(0 ,9 ):
lowerCAmelCase : List[str] = -i
assert all(linked_list[i] == -i for i in range(0 ,9 ) ) is True
linked_list.reverse()
assert str(SCREAMING_SNAKE_CASE__ ) == "->".join(str(SCREAMING_SNAKE_CASE__ ) for i in range(-8 ,1 ) )
def _UpperCAmelCase ( ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = [
-9,
1_0_0,
Node(7_7_3_4_5_1_1_2 ),
"""dlrow olleH""",
7,
5_5_5_5,
0,
-192.55555,
"""Hello, world!""",
77.9,
Node(1_0 ),
None,
None,
12.20,
]
lowerCAmelCase : List[Any] = LinkedList()
for i in test_input:
linked_list.insert_tail(SCREAMING_SNAKE_CASE__ )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(SCREAMING_SNAKE_CASE__ ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
lowerCAmelCase : Any = linked_list.delete_head()
assert result == -9
assert (
str(SCREAMING_SNAKE_CASE__ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
lowerCAmelCase : int = linked_list.delete_tail()
assert result == 12.2
assert (
str(SCREAMING_SNAKE_CASE__ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
lowerCAmelCase : Optional[Any] = linked_list.delete_nth(1_0 )
assert result is None
assert (
str(SCREAMING_SNAKE_CASE__ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node("""Hello again, world!""" ) )
assert (
str(SCREAMING_SNAKE_CASE__ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(SCREAMING_SNAKE_CASE__ )
assert (
str(SCREAMING_SNAKE_CASE__ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(SCREAMING_SNAKE_CASE__ )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def _UpperCAmelCase ( ):
'''simple docstring'''
from doctest import testmod
testmod()
lowerCAmelCase : Tuple = LinkedList()
linked_list.insert_head(input("""Inserting 1st at head """ ).strip() )
linked_list.insert_head(input("""Inserting 2nd at head """ ).strip() )
print("""\nPrint list:""" )
linked_list.print_list()
linked_list.insert_tail(input("""\nInserting 1st at tail """ ).strip() )
linked_list.insert_tail(input("""Inserting 2nd at tail """ ).strip() )
print("""\nPrint list:""" )
linked_list.print_list()
print("""\nDelete head""" )
linked_list.delete_head()
print("""Delete tail""" )
linked_list.delete_tail()
print("""\nPrint list:""" )
linked_list.print_list()
print("""\nReverse linked list""" )
linked_list.reverse()
print("""\nPrint list:""" )
linked_list.print_list()
print("""\nString representation of linked list:""" )
print(SCREAMING_SNAKE_CASE__ )
print("""\nReading/changing Node data using indexing:""" )
print(F"""Element at Position 1: {linked_list[1]}""" )
lowerCAmelCase : Optional[int] = input("""Enter New Value: """ ).strip()
print("""New list:""" )
print(SCREAMING_SNAKE_CASE__ )
print(F"""length of linked_list is : {len(SCREAMING_SNAKE_CASE__ )}""" )
if __name__ == "__main__":
main()
| 693 |
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _a ( snake_case_ , snake_case_ , unittest.TestCase ):
_UpperCamelCase: Optional[Any] = IFImgaImgSuperResolutionPipeline
_UpperCamelCase: int = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"width", "height"}
_UpperCamelCase: Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"original_image"} )
_UpperCamelCase: List[str] = PipelineTesterMixin.required_optional_params - {"latents"}
def _snake_case ( self ) -> int:
return self._get_superresolution_dummy_components()
def _snake_case ( self , lowercase_ , lowercase_=0 ) -> Optional[Any]:
if str(lowercase_ ).startswith("""mps""" ):
lowerCAmelCase : Any = torch.manual_seed(lowercase_ )
else:
lowerCAmelCase : List[Any] = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
lowerCAmelCase : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase_ ) ).to(lowercase_ )
lowerCAmelCase : Optional[Any] = floats_tensor((1, 3, 16, 16) , rng=random.Random(lowercase_ ) ).to(lowercase_ )
lowerCAmelCase : Optional[int] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def _snake_case ( self ) -> Optional[int]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def _snake_case ( self ) -> int:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def _snake_case ( self ) -> Any:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def _snake_case ( self ) -> int:
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def _snake_case ( self ) -> Any:
self._test_save_load_local()
def _snake_case ( self ) -> str:
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 693 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCAmelCase : Any =logging.get_logger(__name__)
class _a ( snake_case_ ):
_UpperCamelCase: Any = ["pixel_values"]
def __init__( self , lowercase_ = True , lowercase_ = None , lowercase_ = None , lowercase_ = PILImageResampling.BILINEAR , lowercase_ = True , lowercase_ = 1 / 255 , lowercase_ = True , lowercase_ = None , lowercase_ = None , **lowercase_ , ) -> None:
super().__init__(**lowercase_ )
lowerCAmelCase : int = size if size is not None else {"""shortest_edge""": 384}
lowerCAmelCase : Any = get_size_dict(lowercase_ , default_to_square=lowercase_ )
lowerCAmelCase : Optional[Any] = do_resize
lowerCAmelCase : Optional[int] = size
# Default value set here for backwards compatibility where the value in config is None
lowerCAmelCase : List[Any] = crop_pct if crop_pct is not None else 224 / 256
lowerCAmelCase : Any = resample
lowerCAmelCase : Any = do_rescale
lowerCAmelCase : Optional[Any] = rescale_factor
lowerCAmelCase : Dict = do_normalize
lowerCAmelCase : Optional[int] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _snake_case ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ = PILImageResampling.BICUBIC , lowercase_ = None , **lowercase_ , ) -> np.ndarray:
lowerCAmelCase : Union[str, Any] = get_size_dict(lowercase_ , default_to_square=lowercase_ )
if "shortest_edge" not in size:
raise ValueError(f"""Size dictionary must contain 'shortest_edge' key. Got {size.keys()}""" )
lowerCAmelCase : Optional[Any] = size["""shortest_edge"""]
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
lowerCAmelCase : Any = int(shortest_edge / crop_pct )
lowerCAmelCase : Tuple = get_resize_output_image_size(lowercase_ , size=lowercase_ , default_to_square=lowercase_ )
lowerCAmelCase : str = resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_ )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=lowercase_ , size=(shortest_edge, shortest_edge) , data_format=lowercase_ , **lowercase_ )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
lowercase_ , size=(shortest_edge, shortest_edge) , resample=lowercase_ , data_format=lowercase_ , **lowercase_ )
def _snake_case ( self , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ) -> Tuple:
return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_ )
def _snake_case ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ) -> np.ndarray:
return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_ )
def _snake_case ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = ChannelDimension.FIRST , **lowercase_ , ) -> PIL.Image.Image:
lowerCAmelCase : Optional[int] = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase : Tuple = crop_pct if crop_pct is not None else self.crop_pct
lowerCAmelCase : Tuple = resample if resample is not None else self.resample
lowerCAmelCase : str = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase : int = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase : int = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase : Any = image_std if image_std is not None else self.image_std
lowerCAmelCase : Dict = size if size is not None else self.size
lowerCAmelCase : Union[str, Any] = get_size_dict(lowercase_ , default_to_square=lowercase_ )
lowerCAmelCase : str = make_list_of_images(lowercase_ )
if not valid_images(lowercase_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError("""crop_pct must be specified if size < 384.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
lowerCAmelCase : List[str] = [to_numpy_array(lowercase_ ) for image in images]
if do_resize:
lowerCAmelCase : Any = [self.resize(image=lowercase_ , size=lowercase_ , crop_pct=lowercase_ , resample=lowercase_ ) for image in images]
if do_rescale:
lowerCAmelCase : Optional[int] = [self.rescale(image=lowercase_ , scale=lowercase_ ) for image in images]
if do_normalize:
lowerCAmelCase : Optional[Any] = [self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_ ) for image in images]
lowerCAmelCase : List[Any] = [to_channel_dimension_format(lowercase_ , lowercase_ ) for image in images]
lowerCAmelCase : List[Any] = {"""pixel_values""": images}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_ )
| 693 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Optional[int] =logging.get_logger(__name__)
lowerCAmelCase : List[Any] ={}
class _a ( snake_case_ ):
_UpperCamelCase: Tuple = "llama"
_UpperCamelCase: List[str] = ["past_key_values"]
def __init__( self , lowercase_=32000 , lowercase_=4096 , lowercase_=11008 , lowercase_=32 , lowercase_=32 , lowercase_=None , lowercase_="silu" , lowercase_=2048 , lowercase_=0.0_2 , lowercase_=1e-6 , lowercase_=True , lowercase_=0 , lowercase_=1 , lowercase_=2 , lowercase_=1 , lowercase_=False , lowercase_=None , **lowercase_ , ) -> Optional[int]:
lowerCAmelCase : List[str] = vocab_size
lowerCAmelCase : str = max_position_embeddings
lowerCAmelCase : int = hidden_size
lowerCAmelCase : Optional[int] = intermediate_size
lowerCAmelCase : int = num_hidden_layers
lowerCAmelCase : Any = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
lowerCAmelCase : Any = num_attention_heads
lowerCAmelCase : Any = num_key_value_heads
lowerCAmelCase : Any = hidden_act
lowerCAmelCase : Union[str, Any] = initializer_range
lowerCAmelCase : str = rms_norm_eps
lowerCAmelCase : int = pretraining_tp
lowerCAmelCase : int = use_cache
lowerCAmelCase : Optional[Any] = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , tie_word_embeddings=lowercase_ , **lowercase_ , )
def _snake_case ( self ) -> Dict:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , lowercase_ ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
f"""got {self.rope_scaling}""" )
lowerCAmelCase : Union[str, Any] = self.rope_scaling.get("""type""" , lowercase_ )
lowerCAmelCase : Dict = self.rope_scaling.get("""factor""" , lowercase_ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(lowercase_ , lowercase_ ) or rope_scaling_factor <= 1.0:
raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 693 | 1 |
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _a ( snake_case_ , unittest.TestCase ):
_UpperCamelCase: List[str] = VideoToVideoSDPipeline
_UpperCamelCase: List[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"video"} ) - {"image", "width", "height"}
_UpperCamelCase: int = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"video"} ) - {"image"}
_UpperCamelCase: Tuple = PipelineTesterMixin.required_optional_params - {"latents"}
_UpperCamelCase: Any = False
# No `output_type`.
_UpperCamelCase: Dict = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback",
"callback_steps",
] )
def _snake_case ( self ) -> Dict:
torch.manual_seed(0 )
lowerCAmelCase : List[str] = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D""") , up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""") , cross_attention_dim=32 , attention_head_dim=4 , )
lowerCAmelCase : str = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=lowercase_ , set_alpha_to_one=lowercase_ , )
torch.manual_seed(0 )
lowerCAmelCase : Tuple = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowerCAmelCase : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , )
lowerCAmelCase : Any = CLIPTextModel(lowercase_ )
lowerCAmelCase : int = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowerCAmelCase : Optional[int] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def _snake_case ( self , lowercase_ , lowercase_=0 ) -> List[str]:
# 3 frames
lowerCAmelCase : List[Any] = floats_tensor((1, 3, 3, 32, 32) , rng=random.Random(lowercase_ ) ).to(lowercase_ )
if str(lowercase_ ).startswith("""mps""" ):
lowerCAmelCase : str = torch.manual_seed(lowercase_ )
else:
lowerCAmelCase : Tuple = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
lowerCAmelCase : Optional[int] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""video""": video,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """pt""",
}
return inputs
def _snake_case ( self ) -> Tuple:
lowerCAmelCase : Tuple = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase : str = self.get_dummy_components()
lowerCAmelCase : Optional[int] = VideoToVideoSDPipeline(**lowercase_ )
lowerCAmelCase : str = sd_pipe.to(lowercase_ )
sd_pipe.set_progress_bar_config(disable=lowercase_ )
lowerCAmelCase : List[Any] = self.get_dummy_inputs(lowercase_ )
lowerCAmelCase : Union[str, Any] = """np"""
lowerCAmelCase : Tuple = sd_pipe(**lowercase_ ).frames
lowerCAmelCase : List[str] = frames[0][-3:, -3:, -1]
assert frames[0].shape == (32, 32, 3)
lowerCAmelCase : List[Any] = np.array([106, 117, 113, 174, 137, 112, 148, 151, 131] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def _snake_case ( self ) -> Optional[Any]:
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowercase_ , expected_max_diff=5e-3 )
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def _snake_case ( self ) -> Optional[int]:
pass
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def _snake_case ( self ) -> Optional[Any]:
pass
@unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" )
def _snake_case ( self ) -> Tuple:
pass
def _snake_case ( self ) -> Union[str, Any]:
return super().test_progress_bar()
@slow
@skip_mps
class _a ( unittest.TestCase ):
def _snake_case ( self ) -> int:
lowerCAmelCase : List[Any] = VideoToVideoSDPipeline.from_pretrained("""cerspense/zeroscope_v2_XL""" , torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
lowerCAmelCase : Any = torch.Generator(device="""cpu""" ).manual_seed(0 )
lowerCAmelCase : Tuple = torch.randn((1, 10, 3, 1024, 576) , generator=lowercase_ )
lowerCAmelCase : List[str] = video.to("""cuda""" )
lowerCAmelCase : Optional[int] = """Spiderman is surfing"""
lowerCAmelCase : List[Any] = pipe(lowercase_ , video=lowercase_ , generator=lowercase_ , num_inference_steps=3 , output_type="""pt""" ).frames
lowerCAmelCase : Optional[int] = np.array([-1.0_4_5_8_9_8_4, -1.1_2_7_9_2_9_7, -0.9_6_6_3_0_8_6, -0.9_1_5_0_3_9_0_6, -0.7_5_0_9_7_6_5_6] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1e-2
| 693 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase : int =logging.get_logger(__name__)
lowerCAmelCase : List[Any] ={
'microsoft/swin-tiny-patch4-window7-224': (
'https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json'
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class _a ( snake_case_ , snake_case_ ):
_UpperCamelCase: int = "swin"
_UpperCamelCase: str = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , lowercase_=224 , lowercase_=4 , lowercase_=3 , lowercase_=96 , lowercase_=[2, 2, 6, 2] , lowercase_=[3, 6, 12, 24] , lowercase_=7 , lowercase_=4.0 , lowercase_=True , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.1 , lowercase_="gelu" , lowercase_=False , lowercase_=0.0_2 , lowercase_=1e-5 , lowercase_=32 , lowercase_=None , lowercase_=None , **lowercase_ , ) -> Tuple:
super().__init__(**lowercase_ )
lowerCAmelCase : Optional[int] = image_size
lowerCAmelCase : Optional[Any] = patch_size
lowerCAmelCase : Optional[Any] = num_channels
lowerCAmelCase : List[Any] = embed_dim
lowerCAmelCase : str = depths
lowerCAmelCase : List[str] = len(lowercase_ )
lowerCAmelCase : Any = num_heads
lowerCAmelCase : str = window_size
lowerCAmelCase : List[str] = mlp_ratio
lowerCAmelCase : List[Any] = qkv_bias
lowerCAmelCase : List[str] = hidden_dropout_prob
lowerCAmelCase : int = attention_probs_dropout_prob
lowerCAmelCase : Any = drop_path_rate
lowerCAmelCase : int = hidden_act
lowerCAmelCase : int = use_absolute_embeddings
lowerCAmelCase : Dict = layer_norm_eps
lowerCAmelCase : Any = initializer_range
lowerCAmelCase : Dict = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCAmelCase : Any = int(embed_dim * 2 ** (len(lowercase_ ) - 1) )
lowerCAmelCase : Dict = ["""stem"""] + [f"""stage{idx}""" for idx in range(1 , len(lowercase_ ) + 1 )]
lowerCAmelCase , lowerCAmelCase : Optional[Any] = get_aligned_output_features_output_indices(
out_features=lowercase_ , out_indices=lowercase_ , stage_names=self.stage_names )
class _a ( snake_case_ ):
_UpperCamelCase: int = version.parse("1.11" )
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def _snake_case ( self ) -> float:
return 1e-4
| 693 | 1 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if not nums: # Makes sure that the list is not empty
raise ValueError("""List is empty""" )
lowerCAmelCase : Dict = sum(SCREAMING_SNAKE_CASE__ ) / len(SCREAMING_SNAKE_CASE__ ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 693 |
lowerCAmelCase : str ={
'Pillow': 'Pillow<10.0.0',
'accelerate': 'accelerate>=0.20.3',
'av': 'av==9.2.0',
'beautifulsoup4': 'beautifulsoup4',
'black': 'black~=23.1',
'codecarbon': 'codecarbon==1.2.0',
'cookiecutter': 'cookiecutter==1.7.3',
'dataclasses': 'dataclasses',
'datasets': 'datasets!=2.5.0',
'decord': 'decord==0.6.0',
'deepspeed': 'deepspeed>=0.9.3',
'diffusers': 'diffusers',
'dill': 'dill<0.3.5',
'evaluate': 'evaluate>=0.2.0',
'fairscale': 'fairscale>0.3',
'faiss-cpu': 'faiss-cpu',
'fastapi': 'fastapi',
'filelock': 'filelock',
'flax': 'flax>=0.4.1,<=0.7.0',
'ftfy': 'ftfy',
'fugashi': 'fugashi>=1.0',
'GitPython': 'GitPython<3.1.19',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.14.1,<1.0',
'importlib_metadata': 'importlib_metadata',
'ipadic': 'ipadic>=1.0.0,<2.0',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2,<=0.4.13',
'jaxlib': 'jaxlib>=0.1.65,<=0.4.13',
'jieba': 'jieba',
'kenlm': 'kenlm',
'keras-nlp': 'keras-nlp>=0.3.1',
'librosa': 'librosa',
'nltk': 'nltk',
'natten': 'natten>=0.14.6',
'numpy': 'numpy>=1.17',
'onnxconverter-common': 'onnxconverter-common',
'onnxruntime-tools': 'onnxruntime-tools>=1.4.2',
'onnxruntime': 'onnxruntime>=1.4.0',
'opencv-python': 'opencv-python',
'optuna': 'optuna',
'optax': 'optax>=0.0.8,<=0.1.4',
'packaging': 'packaging>=20.0',
'parameterized': 'parameterized',
'phonemizer': 'phonemizer',
'protobuf': 'protobuf',
'psutil': 'psutil',
'pyyaml': 'pyyaml>=5.1',
'pydantic': 'pydantic<2',
'pytest': 'pytest>=7.2.0',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'python': 'python>=3.8.0',
'ray[tune]': 'ray[tune]',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'rhoknp': 'rhoknp>=1.1.0,<1.3.1',
'rjieba': 'rjieba',
'rouge-score': 'rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1',
'ruff': 'ruff>=0.0.241,<=0.0.259',
'sacrebleu': 'sacrebleu>=1.4.12,<2.0.0',
'sacremoses': 'sacremoses',
'safetensors': 'safetensors>=0.3.1',
'sagemaker': 'sagemaker>=2.31.0',
'scikit-learn': 'scikit-learn',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'sigopt': 'sigopt',
'starlette': 'starlette',
'sudachipy': 'sudachipy>=0.6.6',
'sudachidict_core': 'sudachidict_core>=20220729',
'tensorflow-cpu': 'tensorflow-cpu>=2.6,<2.14',
'tensorflow': 'tensorflow>=2.6,<2.14',
'tensorflow-text': 'tensorflow-text<2.14',
'tf2onnx': 'tf2onnx',
'timeout-decorator': 'timeout-decorator',
'timm': 'timm',
'tokenizers': 'tokenizers>=0.11.1,!=0.11.3,<0.14',
'torch': 'torch>=1.9,!=1.12.0',
'torchaudio': 'torchaudio',
'torchvision': 'torchvision',
'pyctcdecode': 'pyctcdecode>=0.4.0',
'tqdm': 'tqdm>=4.27',
'unidic': 'unidic>=1.0.2',
'unidic_lite': 'unidic_lite>=1.0.7',
'urllib3': 'urllib3<2.0.0',
'uvicorn': 'uvicorn',
}
| 693 | 1 |
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class _a ( snake_case_ ):
def __init__( self , lowercase_=0.0_1 , lowercase_=1000 ) -> List[str]:
lowerCAmelCase : str = p_stop
lowerCAmelCase : str = max_length
def __iter__( self ) -> List[Any]:
lowerCAmelCase : List[Any] = 0
lowerCAmelCase : List[Any] = False
while not stop and count < self.max_length:
yield count
count += 1
lowerCAmelCase : Union[str, Any] = random.random() < self.p_stop
class _a ( unittest.TestCase ):
def _snake_case ( self , lowercase_ , lowercase_ , lowercase_=False , lowercase_=True ) -> Tuple:
lowerCAmelCase : Any = [
BatchSamplerShard(lowercase_ , 2 , lowercase_ , split_batches=lowercase_ , even_batches=lowercase_ )
for i in range(2 )
]
lowerCAmelCase : Dict = [list(lowercase_ ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(lowercase_ ) for shard in batch_sampler_shards] , [len(lowercase_ ) for e in expected] )
self.assertListEqual(lowercase_ , lowercase_ )
def _snake_case ( self ) -> Optional[Any]:
# Check the shards when the dataset is a round multiple of total batch size.
lowerCAmelCase : Any = BatchSampler(range(24 ) , batch_size=3 , drop_last=lowercase_ )
lowerCAmelCase : Optional[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(lowercase_ , lowercase_ )
lowerCAmelCase : Dict = BatchSampler(range(24 ) , batch_size=3 , drop_last=lowercase_ )
# Expected shouldn't change
self.check_batch_sampler_shards(lowercase_ , lowercase_ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
lowerCAmelCase : Union[str, Any] = BatchSampler(range(21 ) , batch_size=3 , drop_last=lowercase_ )
lowerCAmelCase : List[str] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(lowercase_ , lowercase_ )
lowerCAmelCase : Optional[Any] = BatchSampler(range(21 ) , batch_size=3 , drop_last=lowercase_ )
lowerCAmelCase : Dict = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowercase_ , lowercase_ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
lowerCAmelCase : List[Any] = BatchSampler(range(22 ) , batch_size=3 , drop_last=lowercase_ )
lowerCAmelCase : Dict = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(lowercase_ , lowercase_ )
lowerCAmelCase : Any = BatchSampler(range(22 ) , batch_size=3 , drop_last=lowercase_ )
lowerCAmelCase : List[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowercase_ , lowercase_ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
lowerCAmelCase : Any = BatchSampler(range(20 ) , batch_size=3 , drop_last=lowercase_ )
lowerCAmelCase : str = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(lowercase_ , lowercase_ )
lowerCAmelCase : List[Any] = BatchSampler(range(20 ) , batch_size=3 , drop_last=lowercase_ )
lowerCAmelCase : List[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowercase_ , lowercase_ )
# Check the shards when the dataset is very small.
lowerCAmelCase : int = BatchSampler(range(2 ) , batch_size=3 , drop_last=lowercase_ )
lowerCAmelCase : Optional[int] = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(lowercase_ , lowercase_ )
lowerCAmelCase : int = BatchSampler(range(2 ) , batch_size=3 , drop_last=lowercase_ )
lowerCAmelCase : Optional[Any] = [[], []]
self.check_batch_sampler_shards(lowercase_ , lowercase_ )
def _snake_case ( self ) -> Dict:
# Check the shards when the dataset is a round multiple of batch size.
lowerCAmelCase : List[Any] = BatchSampler(range(24 ) , batch_size=4 , drop_last=lowercase_ )
lowerCAmelCase : List[str] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(lowercase_ , lowercase_ , split_batches=lowercase_ )
lowerCAmelCase : Any = BatchSampler(range(24 ) , batch_size=4 , drop_last=lowercase_ )
# Expected shouldn't change
self.check_batch_sampler_shards(lowercase_ , lowercase_ , split_batches=lowercase_ )
# Check the shards when the dataset is not a round multiple of batch size.
lowerCAmelCase : List[str] = BatchSampler(range(22 ) , batch_size=4 , drop_last=lowercase_ )
lowerCAmelCase : Any = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(lowercase_ , lowercase_ , split_batches=lowercase_ )
lowerCAmelCase : Any = BatchSampler(range(22 ) , batch_size=4 , drop_last=lowercase_ )
lowerCAmelCase : Any = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowercase_ , lowercase_ , split_batches=lowercase_ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
lowerCAmelCase : int = BatchSampler(range(21 ) , batch_size=4 , drop_last=lowercase_ )
lowerCAmelCase : Any = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(lowercase_ , lowercase_ , split_batches=lowercase_ )
lowerCAmelCase : Tuple = BatchSampler(range(21 ) , batch_size=4 , drop_last=lowercase_ )
lowerCAmelCase : Optional[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowercase_ , lowercase_ , split_batches=lowercase_ )
# Check the shards when the dataset is very small.
lowerCAmelCase : List[str] = BatchSampler(range(2 ) , batch_size=4 , drop_last=lowercase_ )
lowerCAmelCase : Tuple = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(lowercase_ , lowercase_ , split_batches=lowercase_ )
lowerCAmelCase : List[str] = BatchSampler(range(2 ) , batch_size=4 , drop_last=lowercase_ )
lowerCAmelCase : Dict = [[], []]
self.check_batch_sampler_shards(lowercase_ , lowercase_ , split_batches=lowercase_ )
def _snake_case ( self ) -> List[Any]:
# Check the shards when the dataset is a round multiple of total batch size.
lowerCAmelCase : Tuple = BatchSampler(range(24 ) , batch_size=3 , drop_last=lowercase_ )
lowerCAmelCase : Optional[int] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(lowercase_ , lowercase_ , even_batches=lowercase_ )
lowerCAmelCase : str = BatchSampler(range(24 ) , batch_size=3 , drop_last=lowercase_ )
# Expected shouldn't change
self.check_batch_sampler_shards(lowercase_ , lowercase_ , even_batches=lowercase_ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
lowerCAmelCase : Optional[int] = BatchSampler(range(21 ) , batch_size=3 , drop_last=lowercase_ )
lowerCAmelCase : List[str] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowercase_ , lowercase_ , even_batches=lowercase_ )
lowerCAmelCase : Optional[int] = BatchSampler(range(21 ) , batch_size=3 , drop_last=lowercase_ )
lowerCAmelCase : Dict = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowercase_ , lowercase_ , even_batches=lowercase_ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
lowerCAmelCase : Dict = BatchSampler(range(22 ) , batch_size=3 , drop_last=lowercase_ )
lowerCAmelCase : int = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(lowercase_ , lowercase_ , even_batches=lowercase_ )
lowerCAmelCase : List[str] = BatchSampler(range(22 ) , batch_size=3 , drop_last=lowercase_ )
lowerCAmelCase : Dict = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowercase_ , lowercase_ , even_batches=lowercase_ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
lowerCAmelCase : List[str] = BatchSampler(range(20 ) , batch_size=3 , drop_last=lowercase_ )
lowerCAmelCase : Any = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowercase_ , lowercase_ , even_batches=lowercase_ )
lowerCAmelCase : Optional[Any] = BatchSampler(range(20 ) , batch_size=3 , drop_last=lowercase_ )
lowerCAmelCase : int = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowercase_ , lowercase_ , even_batches=lowercase_ )
# Check the shards when the dataset is very small.
lowerCAmelCase : Dict = BatchSampler(range(2 ) , batch_size=3 , drop_last=lowercase_ )
lowerCAmelCase : str = [[[0, 1]], []]
self.check_batch_sampler_shards(lowercase_ , lowercase_ , even_batches=lowercase_ )
lowerCAmelCase : List[Any] = BatchSampler(range(2 ) , batch_size=3 , drop_last=lowercase_ )
lowerCAmelCase : str = [[], []]
self.check_batch_sampler_shards(lowercase_ , lowercase_ , even_batches=lowercase_ )
def _snake_case ( self ) -> Optional[int]:
# Check the shards when the dataset is a round multiple of batch size.
lowerCAmelCase : int = BatchSampler(range(24 ) , batch_size=4 , drop_last=lowercase_ )
lowerCAmelCase : Tuple = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(lowercase_ , lowercase_ , split_batches=lowercase_ , even_batches=lowercase_ )
lowerCAmelCase : Union[str, Any] = BatchSampler(range(24 ) , batch_size=4 , drop_last=lowercase_ )
# Expected shouldn't change
self.check_batch_sampler_shards(lowercase_ , lowercase_ , split_batches=lowercase_ , even_batches=lowercase_ )
# Check the shards when the dataset is not a round multiple of batch size.
lowerCAmelCase : str = BatchSampler(range(22 ) , batch_size=4 , drop_last=lowercase_ )
lowerCAmelCase : Dict = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowercase_ , lowercase_ , split_batches=lowercase_ , even_batches=lowercase_ )
lowerCAmelCase : List[str] = BatchSampler(range(22 ) , batch_size=4 , drop_last=lowercase_ )
lowerCAmelCase : str = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowercase_ , lowercase_ , split_batches=lowercase_ , even_batches=lowercase_ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
lowerCAmelCase : Union[str, Any] = BatchSampler(range(21 ) , batch_size=4 , drop_last=lowercase_ )
lowerCAmelCase : Any = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowercase_ , lowercase_ , split_batches=lowercase_ , even_batches=lowercase_ )
lowerCAmelCase : List[str] = BatchSampler(range(21 ) , batch_size=4 , drop_last=lowercase_ )
lowerCAmelCase : Optional[int] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowercase_ , lowercase_ , split_batches=lowercase_ , even_batches=lowercase_ )
# Check the shards when the dataset is very small.
lowerCAmelCase : Union[str, Any] = BatchSampler(range(2 ) , batch_size=4 , drop_last=lowercase_ )
lowerCAmelCase : Dict = [[[0, 1]], []]
self.check_batch_sampler_shards(lowercase_ , lowercase_ , split_batches=lowercase_ , even_batches=lowercase_ )
lowerCAmelCase : Tuple = BatchSampler(range(2 ) , batch_size=4 , drop_last=lowercase_ )
lowerCAmelCase : int = [[], []]
self.check_batch_sampler_shards(lowercase_ , lowercase_ , split_batches=lowercase_ , even_batches=lowercase_ )
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase : Dict = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
lowerCAmelCase : Optional[Any] = [BatchSamplerShard(lowercase_ , 2 , lowercase_ , even_batches=lowercase_ ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def _snake_case ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_=False , lowercase_=2 , lowercase_=False ) -> List[Any]:
random.seed(lowercase_ )
lowerCAmelCase : List[Any] = list(lowercase_ )
lowerCAmelCase : Optional[Any] = [
IterableDatasetShard(
lowercase_ , batch_size=lowercase_ , drop_last=lowercase_ , num_processes=lowercase_ , process_index=lowercase_ , split_batches=lowercase_ , )
for i in range(lowercase_ )
]
lowerCAmelCase : Dict = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(lowercase_ )
iterable_dataset_lists.append(list(lowercase_ ) )
lowerCAmelCase : List[str] = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
lowerCAmelCase : List[str] = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) )
self.assertTrue(len(lowercase_ ) % shard_batch_size == 0 )
lowerCAmelCase : Optional[int] = []
for idx in range(0 , len(lowercase_ ) , lowercase_ ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(lowercase_ ) < len(lowercase_ ):
reference += reference
self.assertListEqual(lowercase_ , reference[: len(lowercase_ )] )
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase : int = 42
lowerCAmelCase : List[str] = RandomIterableDataset()
self.check_iterable_dataset_shards(lowercase_ , lowercase_ , batch_size=4 , drop_last=lowercase_ , split_batches=lowercase_ )
self.check_iterable_dataset_shards(lowercase_ , lowercase_ , batch_size=4 , drop_last=lowercase_ , split_batches=lowercase_ )
self.check_iterable_dataset_shards(lowercase_ , lowercase_ , batch_size=4 , drop_last=lowercase_ , split_batches=lowercase_ )
self.check_iterable_dataset_shards(lowercase_ , lowercase_ , batch_size=4 , drop_last=lowercase_ , split_batches=lowercase_ )
# Edge case with a very small dataset
lowerCAmelCase : str = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(lowercase_ , lowercase_ , batch_size=4 , drop_last=lowercase_ , split_batches=lowercase_ )
self.check_iterable_dataset_shards(lowercase_ , lowercase_ , batch_size=4 , drop_last=lowercase_ , split_batches=lowercase_ )
self.check_iterable_dataset_shards(lowercase_ , lowercase_ , batch_size=4 , drop_last=lowercase_ , split_batches=lowercase_ )
self.check_iterable_dataset_shards(lowercase_ , lowercase_ , batch_size=4 , drop_last=lowercase_ , split_batches=lowercase_ )
def _snake_case ( self ) -> Dict:
lowerCAmelCase : Union[str, Any] = BatchSampler(range(16 ) , batch_size=4 , drop_last=lowercase_ )
lowerCAmelCase : List[Any] = SkipBatchSampler(lowercase_ , 2 )
self.assertListEqual(list(lowercase_ ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase : Optional[Any] = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def _snake_case ( self ) -> Tuple:
lowerCAmelCase : Optional[Any] = DataLoader(list(range(16 ) ) , batch_size=4 )
lowerCAmelCase : Union[str, Any] = skip_first_batches(lowercase_ , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase : Union[str, Any] = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(lowercase_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(lowercase_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def _snake_case ( self ) -> Dict:
Accelerator()
lowerCAmelCase : Any = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(lowercase_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(lowercase_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 693 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase : Union[str, Any] ={
'configuration_roformer': ['ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoFormerConfig', 'RoFormerOnnxConfig'],
'tokenization_roformer': ['RoFormerTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Dict =['RoFormerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[int] =[
'ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoFormerForCausalLM',
'RoFormerForMaskedLM',
'RoFormerForMultipleChoice',
'RoFormerForQuestionAnswering',
'RoFormerForSequenceClassification',
'RoFormerForTokenClassification',
'RoFormerLayer',
'RoFormerModel',
'RoFormerPreTrainedModel',
'load_tf_weights_in_roformer',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Tuple =[
'TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRoFormerForCausalLM',
'TFRoFormerForMaskedLM',
'TFRoFormerForMultipleChoice',
'TFRoFormerForQuestionAnswering',
'TFRoFormerForSequenceClassification',
'TFRoFormerForTokenClassification',
'TFRoFormerLayer',
'TFRoFormerModel',
'TFRoFormerPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : int =[
'FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'FlaxRoFormerForMaskedLM',
'FlaxRoFormerForMultipleChoice',
'FlaxRoFormerForQuestionAnswering',
'FlaxRoFormerForSequenceClassification',
'FlaxRoFormerForTokenClassification',
'FlaxRoFormerModel',
'FlaxRoFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Tuple =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 693 | 1 |
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
lowerCAmelCase : Union[str, Any] =logging.get_logger(__name__)
class _a ( snake_case_ ):
def __init__( self , *lowercase_ , **lowercase_ ) -> None:
warnings.warn(
"""The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use BeitImageProcessor instead.""" , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_ )
| 693 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return int(input_a == input_a == 0 )
def _UpperCAmelCase ( ):
'''simple docstring'''
print("""Truth Table of NOR Gate:""" )
print("""| Input 1 | Input 2 | Output |""" )
print(F"""| 0 | 0 | {nor_gate(0 ,0 )} |""" )
print(F"""| 0 | 1 | {nor_gate(0 ,1 )} |""" )
print(F"""| 1 | 0 | {nor_gate(1 ,0 )} |""" )
print(F"""| 1 | 1 | {nor_gate(1 ,1 )} |""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 693 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Optional[int] =logging.get_logger(__name__)
lowerCAmelCase : List[Any] ={}
class _a ( snake_case_ ):
_UpperCamelCase: Tuple = "llama"
_UpperCamelCase: List[str] = ["past_key_values"]
def __init__( self , lowercase_=32000 , lowercase_=4096 , lowercase_=11008 , lowercase_=32 , lowercase_=32 , lowercase_=None , lowercase_="silu" , lowercase_=2048 , lowercase_=0.0_2 , lowercase_=1e-6 , lowercase_=True , lowercase_=0 , lowercase_=1 , lowercase_=2 , lowercase_=1 , lowercase_=False , lowercase_=None , **lowercase_ , ) -> Optional[int]:
lowerCAmelCase : List[str] = vocab_size
lowerCAmelCase : str = max_position_embeddings
lowerCAmelCase : int = hidden_size
lowerCAmelCase : Optional[int] = intermediate_size
lowerCAmelCase : int = num_hidden_layers
lowerCAmelCase : Any = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
lowerCAmelCase : Any = num_attention_heads
lowerCAmelCase : Any = num_key_value_heads
lowerCAmelCase : Any = hidden_act
lowerCAmelCase : Union[str, Any] = initializer_range
lowerCAmelCase : str = rms_norm_eps
lowerCAmelCase : int = pretraining_tp
lowerCAmelCase : int = use_cache
lowerCAmelCase : Optional[Any] = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , tie_word_embeddings=lowercase_ , **lowercase_ , )
def _snake_case ( self ) -> Dict:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , lowercase_ ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
f"""got {self.rope_scaling}""" )
lowerCAmelCase : Union[str, Any] = self.rope_scaling.get("""type""" , lowercase_ )
lowerCAmelCase : Dict = self.rope_scaling.get("""factor""" , lowercase_ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(lowercase_ , lowercase_ ) or rope_scaling_factor <= 1.0:
raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 693 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase : int ={
'configuration_poolformer': [
'POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'PoolFormerConfig',
'PoolFormerOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[Any] =['PoolFormerFeatureExtractor']
lowerCAmelCase : List[str] =['PoolFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Tuple =[
'POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PoolFormerForImageClassification',
'PoolFormerModel',
'PoolFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Any =_LazyModule(__name__, globals()['__file__'], _import_structure)
| 693 | 1 |
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _a :
def __init__( self , lowercase_ , lowercase_=2 , lowercase_=3 , lowercase_=4 , lowercase_=2 , lowercase_=7 , lowercase_=True , lowercase_=True , lowercase_=True , lowercase_=True , lowercase_=99 , lowercase_=36 , lowercase_=3 , lowercase_=4 , lowercase_=37 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=512 , lowercase_=16 , lowercase_=2 , lowercase_=0.0_2 , lowercase_=6 , lowercase_=6 , lowercase_=3 , lowercase_=4 , lowercase_=None , lowercase_=1000 , ) -> str:
lowerCAmelCase : str = parent
lowerCAmelCase : List[Any] = batch_size
lowerCAmelCase : List[str] = num_channels
lowerCAmelCase : Union[str, Any] = image_size
lowerCAmelCase : int = patch_size
lowerCAmelCase : Union[str, Any] = text_seq_length
lowerCAmelCase : Optional[int] = is_training
lowerCAmelCase : Dict = use_input_mask
lowerCAmelCase : List[Any] = use_token_type_ids
lowerCAmelCase : Optional[Any] = use_labels
lowerCAmelCase : int = vocab_size
lowerCAmelCase : Tuple = hidden_size
lowerCAmelCase : List[Any] = num_hidden_layers
lowerCAmelCase : Union[str, Any] = num_attention_heads
lowerCAmelCase : List[Any] = intermediate_size
lowerCAmelCase : Any = hidden_act
lowerCAmelCase : Dict = hidden_dropout_prob
lowerCAmelCase : Optional[Any] = attention_probs_dropout_prob
lowerCAmelCase : Tuple = max_position_embeddings
lowerCAmelCase : Any = type_vocab_size
lowerCAmelCase : List[str] = type_sequence_label_size
lowerCAmelCase : Any = initializer_range
lowerCAmelCase : List[str] = coordinate_size
lowerCAmelCase : Union[str, Any] = shape_size
lowerCAmelCase : List[Any] = num_labels
lowerCAmelCase : str = num_choices
lowerCAmelCase : List[Any] = scope
lowerCAmelCase : Optional[int] = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
lowerCAmelCase : str = text_seq_length
lowerCAmelCase : int = (image_size // patch_size) ** 2 + 1
lowerCAmelCase : int = self.text_seq_length + self.image_seq_length
def _snake_case ( self ) -> Optional[Any]:
lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
lowerCAmelCase : List[Any] = bbox[i, j, 3]
lowerCAmelCase : Optional[Any] = bbox[i, j, 1]
lowerCAmelCase : int = t
if bbox[i, j, 2] < bbox[i, j, 0]:
lowerCAmelCase : List[Any] = bbox[i, j, 2]
lowerCAmelCase : Optional[Any] = bbox[i, j, 0]
lowerCAmelCase : Tuple = t
lowerCAmelCase : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase : List[str] = None
if self.use_input_mask:
lowerCAmelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.text_seq_length] )
lowerCAmelCase : Optional[int] = None
if self.use_token_type_ids:
lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
lowerCAmelCase : int = None
lowerCAmelCase : Dict = None
if self.use_labels:
lowerCAmelCase : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
lowerCAmelCase : int = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def _snake_case ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> int:
lowerCAmelCase : int = LayoutLMvaModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
# text + image
lowerCAmelCase : Any = model(lowercase_ , pixel_values=lowercase_ )
lowerCAmelCase : Optional[int] = model(
lowercase_ , bbox=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ )
lowerCAmelCase : int = model(lowercase_ , bbox=lowercase_ , pixel_values=lowercase_ , token_type_ids=lowercase_ )
lowerCAmelCase : Union[str, Any] = model(lowercase_ , bbox=lowercase_ , pixel_values=lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
lowerCAmelCase : str = model(lowercase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
lowerCAmelCase : str = model(pixel_values=lowercase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def _snake_case ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> List[Any]:
lowerCAmelCase : Optional[int] = self.num_labels
lowerCAmelCase : List[str] = LayoutLMvaForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
lowerCAmelCase : List[Any] = model(
lowercase_ , bbox=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Union[str, Any]:
lowerCAmelCase : Optional[Any] = self.num_labels
lowerCAmelCase : Optional[Any] = LayoutLMvaForTokenClassification(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowerCAmelCase : Union[str, Any] = model(
lowercase_ , bbox=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def _snake_case ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> str:
lowerCAmelCase : Any = LayoutLMvaForQuestionAnswering(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowerCAmelCase : Union[str, Any] = model(
lowercase_ , bbox=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , start_positions=lowercase_ , end_positions=lowercase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self ) -> str:
lowerCAmelCase : List[str] = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) : Optional[Any] = config_and_inputs
lowerCAmelCase : Any = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""pixel_values""": pixel_values,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class _a ( snake_case_ , snake_case_ , unittest.TestCase ):
_UpperCamelCase: Dict = False
_UpperCamelCase: Optional[int] = False
_UpperCamelCase: List[Any] = False
_UpperCamelCase: Tuple = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
_UpperCamelCase: int = (
{"document-question-answering": LayoutLMvaForQuestionAnswering, "feature-extraction": LayoutLMvaModel}
if is_torch_available()
else {}
)
def _snake_case ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> List[str]:
# `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual
# embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has
# the sequence dimension of the text embedding only.
# (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`)
return True
def _snake_case ( self ) -> Dict:
lowerCAmelCase : Dict = LayoutLMvaModelTester(self )
lowerCAmelCase : Union[str, Any] = ConfigTester(self , config_class=lowercase_ , hidden_size=37 )
def _snake_case ( self , lowercase_ , lowercase_ , lowercase_=False ) -> Tuple:
lowerCAmelCase : List[str] = copy.deepcopy(lowercase_ )
if model_class in get_values(lowercase_ ):
lowerCAmelCase : Any = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(lowercase_ , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(lowercase_ ):
lowerCAmelCase : str = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=lowercase_ )
elif model_class in get_values(lowercase_ ):
lowerCAmelCase : Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase_ )
lowerCAmelCase : Tuple = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase_ )
elif model_class in [
*get_values(lowercase_ ),
]:
lowerCAmelCase : int = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase_ )
elif model_class in [
*get_values(lowercase_ ),
]:
lowerCAmelCase : int = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=lowercase_ , )
return inputs_dict
def _snake_case ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def _snake_case ( self ) -> Optional[Any]:
lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def _snake_case ( self ) -> str:
lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase : Optional[Any] = type
self.model_tester.create_and_check_model(*lowercase_ )
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowercase_ )
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase_ )
def _snake_case ( self ) -> List[str]:
lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase_ )
@slow
def _snake_case ( self ) -> List[str]:
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase : Any = LayoutLMvaModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def _UpperCAmelCase ( ):
'''simple docstring'''
lowerCAmelCase : Any = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
class _a ( unittest.TestCase ):
@cached_property
def _snake_case ( self ) -> int:
return LayoutLMvaImageProcessor(apply_ocr=lowercase_ ) if is_vision_available() else None
@slow
def _snake_case ( self ) -> int:
lowerCAmelCase : int = LayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" ).to(lowercase_ )
lowerCAmelCase : Tuple = self.default_image_processor
lowerCAmelCase : str = prepare_img()
lowerCAmelCase : Any = image_processor(images=lowercase_ , return_tensors="""pt""" ).pixel_values.to(lowercase_ )
lowerCAmelCase : str = torch.tensor([[1, 2]] )
lowerCAmelCase : str = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
lowerCAmelCase : Union[str, Any] = model(
input_ids=input_ids.to(lowercase_ ) , bbox=bbox.to(lowercase_ ) , pixel_values=pixel_values.to(lowercase_ ) , )
# verify the logits
lowerCAmelCase : int = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape , lowercase_ )
lowerCAmelCase : str = torch.tensor(
[[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowercase_ , atol=1e-4 ) )
| 693 |
import os
import string
import sys
lowerCAmelCase : Optional[int] =1 << 8
lowerCAmelCase : List[Any] ={
'tab': ord('\t'),
'newline': ord('\r'),
'esc': 27,
'up': 65 + ARROW_KEY_FLAG,
'down': 66 + ARROW_KEY_FLAG,
'right': 67 + ARROW_KEY_FLAG,
'left': 68 + ARROW_KEY_FLAG,
'mod_int': 91,
'undefined': sys.maxsize,
'interrupt': 3,
'insert': 50,
'delete': 51,
'pg_up': 53,
'pg_down': 54,
}
lowerCAmelCase : Optional[Any] =KEYMAP['up']
lowerCAmelCase : Tuple =KEYMAP['left']
if sys.platform == "win32":
lowerCAmelCase : Dict =[]
lowerCAmelCase : int ={
b'\xe0H': KEYMAP['up'] - ARROW_KEY_FLAG,
b'\x00H': KEYMAP['up'] - ARROW_KEY_FLAG,
b'\xe0P': KEYMAP['down'] - ARROW_KEY_FLAG,
b'\x00P': KEYMAP['down'] - ARROW_KEY_FLAG,
b'\xe0M': KEYMAP['right'] - ARROW_KEY_FLAG,
b'\x00M': KEYMAP['right'] - ARROW_KEY_FLAG,
b'\xe0K': KEYMAP['left'] - ARROW_KEY_FLAG,
b'\x00K': KEYMAP['left'] - ARROW_KEY_FLAG,
}
for i in range(10):
lowerCAmelCase : Optional[Any] =ord(str(i))
def _UpperCAmelCase ( ):
'''simple docstring'''
if os.name == "nt":
import msvcrt
lowerCAmelCase : Any = """mbcs"""
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(SCREAMING_SNAKE_CASE__ ) == 0:
# Read the keystroke
lowerCAmelCase : int = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
lowerCAmelCase : Tuple = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
lowerCAmelCase : str = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP["""mod_int"""] ) )
WIN_CH_BUFFER.append(SCREAMING_SNAKE_CASE__ )
if ord(SCREAMING_SNAKE_CASE__ ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(1_2_6 ) )
lowerCAmelCase : Optional[Any] = chr(KEYMAP["""esc"""] )
except KeyError:
lowerCAmelCase : Optional[int] = cha[1]
else:
lowerCAmelCase : Any = ch.decode(SCREAMING_SNAKE_CASE__ )
else:
lowerCAmelCase : Optional[int] = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
lowerCAmelCase : List[Any] = sys.stdin.fileno()
lowerCAmelCase : str = termios.tcgetattr(SCREAMING_SNAKE_CASE__ )
try:
tty.setraw(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Optional[Any] = sys.stdin.read(1 )
finally:
termios.tcsetattr(SCREAMING_SNAKE_CASE__ ,termios.TCSADRAIN ,SCREAMING_SNAKE_CASE__ )
return ch
def _UpperCAmelCase ( ):
'''simple docstring'''
lowerCAmelCase : Any = get_raw_chars()
if ord(SCREAMING_SNAKE_CASE__ ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(SCREAMING_SNAKE_CASE__ ) == KEYMAP["esc"]:
lowerCAmelCase : int = get_raw_chars()
if ord(SCREAMING_SNAKE_CASE__ ) == KEYMAP["mod_int"]:
lowerCAmelCase : Tuple = get_raw_chars()
if ord(SCREAMING_SNAKE_CASE__ ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(SCREAMING_SNAKE_CASE__ ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(SCREAMING_SNAKE_CASE__ ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 693 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
lowerCAmelCase : Any =logging.get_logger(__name__)
lowerCAmelCase : Optional[int] ={'vocab_file': 'vocab.txt'}
lowerCAmelCase : int ={
'vocab_file': {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',
}
}
lowerCAmelCase : Dict ={
'YituTech/conv-bert-base': 512,
'YituTech/conv-bert-medium-small': 512,
'YituTech/conv-bert-small': 512,
}
lowerCAmelCase : List[Any] ={
'YituTech/conv-bert-base': {'do_lower_case': True},
'YituTech/conv-bert-medium-small': {'do_lower_case': True},
'YituTech/conv-bert-small': {'do_lower_case': True},
}
class _a ( snake_case_ ):
_UpperCamelCase: Dict = VOCAB_FILES_NAMES
_UpperCamelCase: Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase: Dict = PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase: int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase: Any = ConvBertTokenizer
def __init__( self , lowercase_=None , lowercase_=None , lowercase_=True , lowercase_="[UNK]" , lowercase_="[SEP]" , lowercase_="[PAD]" , lowercase_="[CLS]" , lowercase_="[MASK]" , lowercase_=True , lowercase_=None , **lowercase_ , ) -> List[Any]:
super().__init__(
lowercase_ , tokenizer_file=lowercase_ , do_lower_case=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , pad_token=lowercase_ , cls_token=lowercase_ , mask_token=lowercase_ , tokenize_chinese_chars=lowercase_ , strip_accents=lowercase_ , **lowercase_ , )
lowerCAmelCase : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , lowercase_ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , lowercase_ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , lowercase_ ) != tokenize_chinese_chars
):
lowerCAmelCase : Dict = getattr(lowercase_ , normalizer_state.pop("""type""" ) )
lowerCAmelCase : List[str] = do_lower_case
lowerCAmelCase : Dict = strip_accents
lowerCAmelCase : Tuple = tokenize_chinese_chars
lowerCAmelCase : List[Any] = normalizer_class(**lowercase_ )
lowerCAmelCase : Tuple = do_lower_case
def _snake_case ( self , lowercase_ , lowercase_=None ) -> List[Any]:
lowerCAmelCase : Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _snake_case ( self , lowercase_ , lowercase_ = None ) -> List[int]:
lowerCAmelCase : Dict = [self.sep_token_id]
lowerCAmelCase : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _snake_case ( self , lowercase_ , lowercase_ = None ) -> Tuple[str]:
lowerCAmelCase : Any = self._tokenizer.model.save(lowercase_ , name=lowercase_ )
return tuple(lowercase_ )
| 693 |
# Imports
import numpy as np
class _a :
def __init__( self , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None ) -> List[Any]:
self.set_matricies(red=lowercase_ , green=lowercase_ , blue=lowercase_ , red_edge=lowercase_ , nir=lowercase_ )
def _snake_case ( self , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None ) -> Union[str, Any]:
if red is not None:
lowerCAmelCase : str = red
if green is not None:
lowerCAmelCase : Optional[int] = green
if blue is not None:
lowerCAmelCase : Optional[int] = blue
if red_edge is not None:
lowerCAmelCase : Tuple = red_edge
if nir is not None:
lowerCAmelCase : Union[str, Any] = nir
return True
def _snake_case ( self , lowercase_="" , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None ) -> Optional[int]:
self.set_matricies(red=lowercase_ , green=lowercase_ , blue=lowercase_ , red_edge=lowercase_ , nir=lowercase_ )
lowerCAmelCase : int = {
"""ARVI2""": self.arvaa,
"""CCCI""": self.ccci,
"""CVI""": self.cvi,
"""GLI""": self.gli,
"""NDVI""": self.ndvi,
"""BNDVI""": self.bndvi,
"""redEdgeNDVI""": self.red_edge_ndvi,
"""GNDVI""": self.gndvi,
"""GBNDVI""": self.gbndvi,
"""GRNDVI""": self.grndvi,
"""RBNDVI""": self.rbndvi,
"""PNDVI""": self.pndvi,
"""ATSAVI""": self.atsavi,
"""BWDRVI""": self.bwdrvi,
"""CIgreen""": self.ci_green,
"""CIrededge""": self.ci_rededge,
"""CI""": self.ci,
"""CTVI""": self.ctvi,
"""GDVI""": self.gdvi,
"""EVI""": self.evi,
"""GEMI""": self.gemi,
"""GOSAVI""": self.gosavi,
"""GSAVI""": self.gsavi,
"""Hue""": self.hue,
"""IVI""": self.ivi,
"""IPVI""": self.ipvi,
"""I""": self.i,
"""RVI""": self.rvi,
"""MRVI""": self.mrvi,
"""MSAVI""": self.m_savi,
"""NormG""": self.norm_g,
"""NormNIR""": self.norm_nir,
"""NormR""": self.norm_r,
"""NGRDI""": self.ngrdi,
"""RI""": self.ri,
"""S""": self.s,
"""IF""": self._if,
"""DVI""": self.dvi,
"""TVI""": self.tvi,
"""NDRE""": self.ndre,
}
try:
return funcs[index]()
except KeyError:
print("""Index not in the list!""" )
return False
def _snake_case ( self ) -> Dict:
return -0.1_8 + (1.1_7 * ((self.nir - self.red) / (self.nir + self.red)))
def _snake_case ( self ) -> Optional[Any]:
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def _snake_case ( self ) -> List[str]:
return self.nir * (self.red / (self.green**2))
def _snake_case ( self ) -> Tuple:
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def _snake_case ( self ) -> Optional[int]:
return (self.nir - self.red) / (self.nir + self.red)
def _snake_case ( self ) -> List[str]:
return (self.nir - self.blue) / (self.nir + self.blue)
def _snake_case ( self ) -> int:
return (self.redEdge - self.red) / (self.redEdge + self.red)
def _snake_case ( self ) -> Optional[Any]:
return (self.nir - self.green) / (self.nir + self.green)
def _snake_case ( self ) -> Tuple:
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def _snake_case ( self ) -> Tuple:
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def _snake_case ( self ) -> int:
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def _snake_case ( self ) -> List[str]:
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def _snake_case ( self , lowercase_=0.0_8 , lowercase_=1.2_2 , lowercase_=0.0_3 ) -> int:
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def _snake_case ( self ) -> Optional[Any]:
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def _snake_case ( self ) -> Any:
return (self.nir / self.green) - 1
def _snake_case ( self ) -> List[Any]:
return (self.nir / self.redEdge) - 1
def _snake_case ( self ) -> str:
return (self.red - self.blue) / self.red
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase : Dict = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def _snake_case ( self ) -> Optional[Any]:
return self.nir - self.green
def _snake_case ( self ) -> int:
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def _snake_case ( self ) -> Optional[Any]:
lowerCAmelCase : Tuple = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.2_5 * n) - (self.red - 0.1_2_5) / (1 - self.red)
def _snake_case ( self , lowercase_=0.1_6 ) -> Optional[int]:
return (self.nir - self.green) / (self.nir + self.green + y)
def _snake_case ( self , lowercase_=0.5 ) -> List[str]:
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def _snake_case ( self ) -> Any:
return np.arctan(
((2 * self.red - self.green - self.blue) / 3_0.5) * (self.green - self.blue) )
def _snake_case ( self , lowercase_=None , lowercase_=None ) -> List[Any]:
return (self.nir - b) / (a * self.red)
def _snake_case ( self ) -> Any:
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def _snake_case ( self ) -> str:
return (self.red + self.green + self.blue) / 3_0.5
def _snake_case ( self ) -> Union[str, Any]:
return self.nir / self.red
def _snake_case ( self ) -> Tuple:
return (self.rvi() - 1) / (self.rvi() + 1)
def _snake_case ( self ) -> Dict:
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def _snake_case ( self ) -> List[Any]:
return self.green / (self.nir + self.red + self.green)
def _snake_case ( self ) -> int:
return self.nir / (self.nir + self.red + self.green)
def _snake_case ( self ) -> Dict:
return self.red / (self.nir + self.red + self.green)
def _snake_case ( self ) -> List[Any]:
return (self.green - self.red) / (self.green + self.red)
def _snake_case ( self ) -> Optional[int]:
return (self.red - self.green) / (self.red + self.green)
def _snake_case ( self ) -> Tuple:
lowerCAmelCase : Any = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
lowerCAmelCase : Dict = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def _snake_case ( self ) -> int:
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def _snake_case ( self ) -> List[str]:
return self.nir / self.red
def _snake_case ( self ) -> int:
return (self.ndvi() + 0.5) ** (1 / 2)
def _snake_case ( self ) -> str:
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 693 | 1 |
import os
import string
import sys
lowerCAmelCase : Optional[int] =1 << 8
lowerCAmelCase : List[Any] ={
'tab': ord('\t'),
'newline': ord('\r'),
'esc': 27,
'up': 65 + ARROW_KEY_FLAG,
'down': 66 + ARROW_KEY_FLAG,
'right': 67 + ARROW_KEY_FLAG,
'left': 68 + ARROW_KEY_FLAG,
'mod_int': 91,
'undefined': sys.maxsize,
'interrupt': 3,
'insert': 50,
'delete': 51,
'pg_up': 53,
'pg_down': 54,
}
lowerCAmelCase : Optional[Any] =KEYMAP['up']
lowerCAmelCase : Tuple =KEYMAP['left']
if sys.platform == "win32":
lowerCAmelCase : Dict =[]
lowerCAmelCase : int ={
b'\xe0H': KEYMAP['up'] - ARROW_KEY_FLAG,
b'\x00H': KEYMAP['up'] - ARROW_KEY_FLAG,
b'\xe0P': KEYMAP['down'] - ARROW_KEY_FLAG,
b'\x00P': KEYMAP['down'] - ARROW_KEY_FLAG,
b'\xe0M': KEYMAP['right'] - ARROW_KEY_FLAG,
b'\x00M': KEYMAP['right'] - ARROW_KEY_FLAG,
b'\xe0K': KEYMAP['left'] - ARROW_KEY_FLAG,
b'\x00K': KEYMAP['left'] - ARROW_KEY_FLAG,
}
for i in range(10):
lowerCAmelCase : Optional[Any] =ord(str(i))
def _UpperCAmelCase ( ):
'''simple docstring'''
if os.name == "nt":
import msvcrt
lowerCAmelCase : Any = """mbcs"""
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(SCREAMING_SNAKE_CASE__ ) == 0:
# Read the keystroke
lowerCAmelCase : int = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
lowerCAmelCase : Tuple = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
lowerCAmelCase : str = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP["""mod_int"""] ) )
WIN_CH_BUFFER.append(SCREAMING_SNAKE_CASE__ )
if ord(SCREAMING_SNAKE_CASE__ ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(1_2_6 ) )
lowerCAmelCase : Optional[Any] = chr(KEYMAP["""esc"""] )
except KeyError:
lowerCAmelCase : Optional[int] = cha[1]
else:
lowerCAmelCase : Any = ch.decode(SCREAMING_SNAKE_CASE__ )
else:
lowerCAmelCase : Optional[int] = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
lowerCAmelCase : List[Any] = sys.stdin.fileno()
lowerCAmelCase : str = termios.tcgetattr(SCREAMING_SNAKE_CASE__ )
try:
tty.setraw(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Optional[Any] = sys.stdin.read(1 )
finally:
termios.tcsetattr(SCREAMING_SNAKE_CASE__ ,termios.TCSADRAIN ,SCREAMING_SNAKE_CASE__ )
return ch
def _UpperCAmelCase ( ):
'''simple docstring'''
lowerCAmelCase : Any = get_raw_chars()
if ord(SCREAMING_SNAKE_CASE__ ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(SCREAMING_SNAKE_CASE__ ) == KEYMAP["esc"]:
lowerCAmelCase : int = get_raw_chars()
if ord(SCREAMING_SNAKE_CASE__ ) == KEYMAP["mod_int"]:
lowerCAmelCase : Tuple = get_raw_chars()
if ord(SCREAMING_SNAKE_CASE__ ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(SCREAMING_SNAKE_CASE__ ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(SCREAMING_SNAKE_CASE__ ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 693 |
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : List[str] = None
if token is not None:
lowerCAmelCase : Union[str, Any] = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"""Bearer {token}"""}
lowerCAmelCase : Optional[Any] = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"""
lowerCAmelCase : Any = requests.get(SCREAMING_SNAKE_CASE__ ,headers=SCREAMING_SNAKE_CASE__ ).json()
lowerCAmelCase : List[str] = {}
try:
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
lowerCAmelCase : int = math.ceil((result["""total_count"""] - 1_0_0) / 1_0_0 )
for i in range(SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase : List[str] = requests.get(url + F"""&page={i + 2}""" ,headers=SCREAMING_SNAKE_CASE__ ).json()
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return job_links
except Exception:
print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = None
if token is not None:
lowerCAmelCase : str = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"""Bearer {token}"""}
lowerCAmelCase : Optional[int] = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"""
lowerCAmelCase : Optional[int] = requests.get(SCREAMING_SNAKE_CASE__ ,headers=SCREAMING_SNAKE_CASE__ ).json()
lowerCAmelCase : List[str] = {}
try:
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
lowerCAmelCase : Optional[int] = math.ceil((result["""total_count"""] - 1_0_0) / 1_0_0 )
for i in range(SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase : int = requests.get(url + F"""&page={i + 2}""" ,headers=SCREAMING_SNAKE_CASE__ ).json()
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
return artifacts
except Exception:
print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Dict = None
if token is not None:
lowerCAmelCase : Optional[Any] = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"""Bearer {token}"""}
lowerCAmelCase : str = requests.get(SCREAMING_SNAKE_CASE__ ,headers=SCREAMING_SNAKE_CASE__ ,allow_redirects=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Union[str, Any] = result.headers["""Location"""]
lowerCAmelCase : Optional[int] = requests.get(SCREAMING_SNAKE_CASE__ ,allow_redirects=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Union[str, Any] = os.path.join(SCREAMING_SNAKE_CASE__ ,F"""{artifact_name}.zip""" )
with open(SCREAMING_SNAKE_CASE__ ,"""wb""" ) as fp:
fp.write(response.content )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : int = []
lowerCAmelCase : Optional[int] = []
lowerCAmelCase : Optional[int] = None
with zipfile.ZipFile(SCREAMING_SNAKE_CASE__ ) as z:
for filename in z.namelist():
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(SCREAMING_SNAKE_CASE__ ) as f:
for line in f:
lowerCAmelCase : Optional[Any] = line.decode("""UTF-8""" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
lowerCAmelCase : str = line[: line.index(""": """ )]
lowerCAmelCase : Optional[int] = line[line.index(""": """ ) + len(""": """ ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("""FAILED """ ):
# `test` is the test method that failed
lowerCAmelCase : Union[str, Any] = line[len("""FAILED """ ) :]
failed_tests.append(SCREAMING_SNAKE_CASE__ )
elif filename == "job_name.txt":
lowerCAmelCase : Union[str, Any] = line
if len(SCREAMING_SNAKE_CASE__ ) != len(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
F"""`errors` and `failed_tests` should have the same number of elements. Got {len(SCREAMING_SNAKE_CASE__ )} for `errors` """
F"""and {len(SCREAMING_SNAKE_CASE__ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"""
""" problem.""" )
lowerCAmelCase : Optional[int] = None
if job_name and job_links:
lowerCAmelCase : Optional[int] = job_links.get(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
# A list with elements of the form (line of error, error, failed test)
lowerCAmelCase : Union[str, Any] = [x + [y] + [job_link] for x, y in zip(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )]
return result
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : str = []
lowerCAmelCase : Union[str, Any] = [os.path.join(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) for p in os.listdir(SCREAMING_SNAKE_CASE__ ) if p.endswith(""".zip""" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(SCREAMING_SNAKE_CASE__ ,job_links=SCREAMING_SNAKE_CASE__ ) )
return errors
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : int = Counter()
counter.update([x[1] for x in logs] )
lowerCAmelCase : List[str] = counter.most_common()
lowerCAmelCase : Union[str, Any] = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
lowerCAmelCase : List[Any] = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]}
lowerCAmelCase : int = dict(sorted(r.items() ,key=lambda SCREAMING_SNAKE_CASE__ : item[1]["count"] ,reverse=SCREAMING_SNAKE_CASE__ ) )
return r
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = test.split("""::""" )[0]
if test.startswith("""tests/models/""" ):
lowerCAmelCase : str = test.split("""/""" )[2]
else:
lowerCAmelCase : List[Any] = None
return test
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : List[Any] = [(x[0], x[1], get_model(x[2] )) for x in logs]
lowerCAmelCase : int = [x for x in logs if x[2] is not None]
lowerCAmelCase : Optional[Any] = {x[2] for x in logs}
lowerCAmelCase : Dict = {}
for test in tests:
lowerCAmelCase : Optional[int] = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
lowerCAmelCase : Tuple = counter.most_common()
lowerCAmelCase : Union[str, Any] = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
lowerCAmelCase : List[Any] = sum(error_counts.values() )
if n_errors > 0:
lowerCAmelCase : Optional[int] = {"""count""": n_errors, """errors""": error_counts}
lowerCAmelCase : Any = dict(sorted(r.items() ,key=lambda SCREAMING_SNAKE_CASE__ : item[1]["count"] ,reverse=SCREAMING_SNAKE_CASE__ ) )
return r
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = """| no. | error | status |"""
lowerCAmelCase : List[Any] = """|-:|:-|:-|"""
lowerCAmelCase : Union[str, Any] = [header, sep]
for error in reduced_by_error:
lowerCAmelCase : List[str] = reduced_by_error[error]["""count"""]
lowerCAmelCase : Any = F"""| {count} | {error[:1_0_0]} | |"""
lines.append(SCREAMING_SNAKE_CASE__ )
return "\n".join(SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : str = """| model | no. of errors | major error | count |"""
lowerCAmelCase : Any = """|-:|-:|-:|-:|"""
lowerCAmelCase : str = [header, sep]
for model in reduced_by_model:
lowerCAmelCase : Any = reduced_by_model[model]["""count"""]
lowerCAmelCase , lowerCAmelCase : Optional[int] = list(reduced_by_model[model]["""errors"""].items() )[0]
lowerCAmelCase : Optional[Any] = F"""| {model} | {count} | {error[:6_0]} | {_count} |"""
lines.append(SCREAMING_SNAKE_CASE__ )
return "\n".join(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
lowerCAmelCase : int =argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
lowerCAmelCase : Dict =parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
lowerCAmelCase : Optional[int] =get_job_links(args.workflow_run_id, token=args.token)
lowerCAmelCase : List[Any] ={}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
lowerCAmelCase : str =k.find(' / ')
lowerCAmelCase : Any =k[index + len(' / ') :]
lowerCAmelCase : str =v
with open(os.path.join(args.output_dir, 'job_links.json'), 'w', encoding='UTF-8') as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
lowerCAmelCase : Any =get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
lowerCAmelCase : List[Any] =get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
lowerCAmelCase : str =Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
lowerCAmelCase : int =counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, 'errors.json'), 'w', encoding='UTF-8') as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
lowerCAmelCase : Optional[int] =reduce_by_error(errors)
lowerCAmelCase : Tuple =reduce_by_model(errors)
lowerCAmelCase : Optional[Any] =make_github_table(reduced_by_error)
lowerCAmelCase : Union[str, Any] =make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, 'reduced_by_error.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, 'reduced_by_model.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
| 693 | 1 |
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Optional[Any] =logging.get_logger(__name__)
lowerCAmelCase : int ={
'kakaobrain/align-base': 'https://huggingface.co/kakaobrain/align-base/resolve/main/config.json',
}
class _a ( snake_case_ ):
_UpperCamelCase: Optional[Any] = "align_text_model"
def __init__( self , lowercase_=30522 , lowercase_=768 , lowercase_=12 , lowercase_=12 , lowercase_=3072 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=512 , lowercase_=2 , lowercase_=0.0_2 , lowercase_=1e-12 , lowercase_=0 , lowercase_="absolute" , lowercase_=True , **lowercase_ , ) -> Any:
super().__init__(**lowercase_ )
lowerCAmelCase : Union[str, Any] = vocab_size
lowerCAmelCase : Optional[int] = hidden_size
lowerCAmelCase : List[Any] = num_hidden_layers
lowerCAmelCase : int = num_attention_heads
lowerCAmelCase : str = hidden_act
lowerCAmelCase : Union[str, Any] = intermediate_size
lowerCAmelCase : Tuple = hidden_dropout_prob
lowerCAmelCase : Tuple = attention_probs_dropout_prob
lowerCAmelCase : Optional[Any] = max_position_embeddings
lowerCAmelCase : Optional[int] = type_vocab_size
lowerCAmelCase : Optional[int] = initializer_range
lowerCAmelCase : List[str] = layer_norm_eps
lowerCAmelCase : Optional[int] = position_embedding_type
lowerCAmelCase : int = use_cache
lowerCAmelCase : Tuple = pad_token_id
@classmethod
def _snake_case ( cls , lowercase_ , **lowercase_ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowercase_ )
lowerCAmelCase , lowerCAmelCase : Tuple = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the text config dict if we are loading from AlignConfig
if config_dict.get("""model_type""" ) == "align":
lowerCAmelCase : Dict = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowercase_ , **lowercase_ )
class _a ( snake_case_ ):
_UpperCamelCase: Dict = "align_vision_model"
def __init__( self , lowercase_ = 3 , lowercase_ = 600 , lowercase_ = 2.0 , lowercase_ = 3.1 , lowercase_ = 8 , lowercase_ = [3, 3, 5, 3, 5, 5, 3] , lowercase_ = [32, 16, 24, 40, 80, 112, 192] , lowercase_ = [16, 24, 40, 80, 112, 192, 320] , lowercase_ = [] , lowercase_ = [1, 2, 2, 2, 1, 2, 1] , lowercase_ = [1, 2, 2, 3, 3, 4, 1] , lowercase_ = [1, 6, 6, 6, 6, 6, 6] , lowercase_ = 0.2_5 , lowercase_ = "swish" , lowercase_ = 2560 , lowercase_ = "mean" , lowercase_ = 0.0_2 , lowercase_ = 0.0_0_1 , lowercase_ = 0.9_9 , lowercase_ = 0.2 , **lowercase_ , ) -> List[Any]:
super().__init__(**lowercase_ )
lowerCAmelCase : List[Any] = num_channels
lowerCAmelCase : int = image_size
lowerCAmelCase : Tuple = width_coefficient
lowerCAmelCase : List[str] = depth_coefficient
lowerCAmelCase : Optional[Any] = depth_divisor
lowerCAmelCase : Dict = kernel_sizes
lowerCAmelCase : Union[str, Any] = in_channels
lowerCAmelCase : Optional[Any] = out_channels
lowerCAmelCase : Optional[Any] = depthwise_padding
lowerCAmelCase : Union[str, Any] = strides
lowerCAmelCase : Union[str, Any] = num_block_repeats
lowerCAmelCase : Optional[Any] = expand_ratios
lowerCAmelCase : List[str] = squeeze_expansion_ratio
lowerCAmelCase : Union[str, Any] = hidden_act
lowerCAmelCase : Union[str, Any] = hidden_dim
lowerCAmelCase : Any = pooling_type
lowerCAmelCase : List[Any] = initializer_range
lowerCAmelCase : List[str] = batch_norm_eps
lowerCAmelCase : Optional[int] = batch_norm_momentum
lowerCAmelCase : Tuple = drop_connect_rate
lowerCAmelCase : Optional[Any] = sum(lowercase_ ) * 4
@classmethod
def _snake_case ( cls , lowercase_ , **lowercase_ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowercase_ )
lowerCAmelCase , lowerCAmelCase : List[Any] = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get("""model_type""" ) == "align":
lowerCAmelCase : Union[str, Any] = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowercase_ , **lowercase_ )
class _a ( snake_case_ ):
_UpperCamelCase: Any = "align"
_UpperCamelCase: Any = True
def __init__( self , lowercase_=None , lowercase_=None , lowercase_=640 , lowercase_=1.0 , lowercase_=0.0_2 , **lowercase_ , ) -> List[str]:
super().__init__(**lowercase_ )
if text_config is None:
lowerCAmelCase : Optional[Any] = {}
logger.info("""text_config is None. Initializing the AlignTextConfig with default values.""" )
if vision_config is None:
lowerCAmelCase : List[Any] = {}
logger.info("""vision_config is None. Initializing the AlignVisionConfig with default values.""" )
lowerCAmelCase : Any = AlignTextConfig(**lowercase_ )
lowerCAmelCase : Dict = AlignVisionConfig(**lowercase_ )
lowerCAmelCase : Any = projection_dim
lowerCAmelCase : int = temperature_init_value
lowerCAmelCase : Optional[Any] = initializer_range
@classmethod
def _snake_case ( cls , lowercase_ , lowercase_ , **lowercase_ ) -> Optional[int]:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowercase_ )
def _snake_case ( self ) -> str:
lowerCAmelCase : Dict = copy.deepcopy(self.__dict__ )
lowerCAmelCase : List[str] = self.text_config.to_dict()
lowerCAmelCase : List[Any] = self.vision_config.to_dict()
lowerCAmelCase : Any = self.__class__.model_type
return output
| 693 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase : Optional[int] ={
'configuration_autoformer': [
'AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AutoformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Dict =[
'AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'AutoformerForPrediction',
'AutoformerModel',
'AutoformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Any =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 693 | 1 |
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class _a ( unittest.TestCase ):
@require_torch
def _snake_case ( self ) -> str:
lowerCAmelCase : Optional[int] = pipeline(
task="""zero-shot-audio-classification""" , model="""hf-internal-testing/tiny-clap-htsat-unfused""" )
lowerCAmelCase : Optional[int] = load_dataset("""ashraq/esc50""" )
lowerCAmelCase : Tuple = dataset["""train"""]["""audio"""][-1]["""array"""]
lowerCAmelCase : Tuple = audio_classifier(lowercase_ , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] )
self.assertEqual(
nested_simplify(lowercase_ ) , [{"""score""": 0.5_0_1, """label""": """Sound of a dog"""}, {"""score""": 0.4_9_9, """label""": """Sound of vaccum cleaner"""}] , )
@unittest.skip("""No models are available in TF""" )
def _snake_case ( self ) -> List[str]:
pass
@slow
@require_torch
def _snake_case ( self ) -> str:
lowerCAmelCase : Optional[Any] = pipeline(
task="""zero-shot-audio-classification""" , model="""laion/clap-htsat-unfused""" , )
# This is an audio of a dog
lowerCAmelCase : Dict = load_dataset("""ashraq/esc50""" )
lowerCAmelCase : Optional[int] = dataset["""train"""]["""audio"""][-1]["""array"""]
lowerCAmelCase : str = audio_classifier(lowercase_ , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] )
self.assertEqual(
nested_simplify(lowercase_ ) , [
{"""score""": 0.9_9_9, """label""": """Sound of a dog"""},
{"""score""": 0.0_0_1, """label""": """Sound of vaccum cleaner"""},
] , )
lowerCAmelCase : Optional[int] = audio_classifier([audio] * 5 , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] )
self.assertEqual(
nested_simplify(lowercase_ ) , [
[
{"""score""": 0.9_9_9, """label""": """Sound of a dog"""},
{"""score""": 0.0_0_1, """label""": """Sound of vaccum cleaner"""},
],
]
* 5 , )
lowerCAmelCase : List[Any] = audio_classifier(
[audio] * 5 , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] , batch_size=5 )
self.assertEqual(
nested_simplify(lowercase_ ) , [
[
{"""score""": 0.9_9_9, """label""": """Sound of a dog"""},
{"""score""": 0.0_0_1, """label""": """Sound of vaccum cleaner"""},
],
]
* 5 , )
@unittest.skip("""No models are available in TF""" )
def _snake_case ( self ) -> Optional[int]:
pass
| 693 |
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCAmelCase : Optional[int] =logging.get_logger(__name__)
lowerCAmelCase : Union[str, Any] ={
'facebook/detr-resnet-50': 'https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class _a ( snake_case_ ):
_UpperCamelCase: List[str] = "detr"
_UpperCamelCase: Dict = ["past_key_values"]
_UpperCamelCase: Optional[int] = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self , lowercase_=True , lowercase_=None , lowercase_=3 , lowercase_=100 , lowercase_=6 , lowercase_=2048 , lowercase_=8 , lowercase_=6 , lowercase_=2048 , lowercase_=8 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=True , lowercase_="relu" , lowercase_=256 , lowercase_=0.1 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.0_2 , lowercase_=1.0 , lowercase_=False , lowercase_="sine" , lowercase_="resnet50" , lowercase_=True , lowercase_=False , lowercase_=1 , lowercase_=5 , lowercase_=2 , lowercase_=1 , lowercase_=1 , lowercase_=5 , lowercase_=2 , lowercase_=0.1 , **lowercase_ , ) -> Optional[int]:
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
lowerCAmelCase : Optional[Any] = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(lowercase_ , lowercase_ ):
lowerCAmelCase : List[Any] = backbone_config.get("""model_type""" )
lowerCAmelCase : int = CONFIG_MAPPING[backbone_model_type]
lowerCAmelCase : Optional[int] = config_class.from_dict(lowercase_ )
# set timm attributes to None
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Dict = None, None, None
lowerCAmelCase : Any = use_timm_backbone
lowerCAmelCase : int = backbone_config
lowerCAmelCase : Optional[int] = num_channels
lowerCAmelCase : Optional[Any] = num_queries
lowerCAmelCase : List[str] = d_model
lowerCAmelCase : Optional[int] = encoder_ffn_dim
lowerCAmelCase : Dict = encoder_layers
lowerCAmelCase : str = encoder_attention_heads
lowerCAmelCase : List[Any] = decoder_ffn_dim
lowerCAmelCase : List[Any] = decoder_layers
lowerCAmelCase : Union[str, Any] = decoder_attention_heads
lowerCAmelCase : str = dropout
lowerCAmelCase : Dict = attention_dropout
lowerCAmelCase : Union[str, Any] = activation_dropout
lowerCAmelCase : str = activation_function
lowerCAmelCase : Optional[int] = init_std
lowerCAmelCase : Any = init_xavier_std
lowerCAmelCase : Dict = encoder_layerdrop
lowerCAmelCase : int = decoder_layerdrop
lowerCAmelCase : Tuple = encoder_layers
lowerCAmelCase : Optional[int] = auxiliary_loss
lowerCAmelCase : List[str] = position_embedding_type
lowerCAmelCase : Any = backbone
lowerCAmelCase : Union[str, Any] = use_pretrained_backbone
lowerCAmelCase : List[Any] = dilation
# Hungarian matcher
lowerCAmelCase : Tuple = class_cost
lowerCAmelCase : Union[str, Any] = bbox_cost
lowerCAmelCase : Optional[Any] = giou_cost
# Loss coefficients
lowerCAmelCase : List[Any] = mask_loss_coefficient
lowerCAmelCase : Optional[int] = dice_loss_coefficient
lowerCAmelCase : Tuple = bbox_loss_coefficient
lowerCAmelCase : Dict = giou_loss_coefficient
lowerCAmelCase : str = eos_coefficient
super().__init__(is_encoder_decoder=lowercase_ , **lowercase_ )
@property
def _snake_case ( self ) -> int:
return self.encoder_attention_heads
@property
def _snake_case ( self ) -> int:
return self.d_model
@classmethod
def _snake_case ( cls , lowercase_ , **lowercase_ ) -> Any:
return cls(backbone_config=lowercase_ , **lowercase_ )
def _snake_case ( self ) -> Dict[str, any]:
lowerCAmelCase : Optional[int] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
lowerCAmelCase : List[str] = self.backbone_config.to_dict()
lowerCAmelCase : List[Any] = self.__class__.model_type
return output
class _a ( snake_case_ ):
_UpperCamelCase: Any = version.parse("1.11" )
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def _snake_case ( self ) -> float:
return 1e-5
@property
def _snake_case ( self ) -> int:
return 12
| 693 | 1 |
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : List[str] = None
if token is not None:
lowerCAmelCase : Union[str, Any] = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"""Bearer {token}"""}
lowerCAmelCase : Optional[Any] = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"""
lowerCAmelCase : Any = requests.get(SCREAMING_SNAKE_CASE__ ,headers=SCREAMING_SNAKE_CASE__ ).json()
lowerCAmelCase : List[str] = {}
try:
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
lowerCAmelCase : int = math.ceil((result["""total_count"""] - 1_0_0) / 1_0_0 )
for i in range(SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase : List[str] = requests.get(url + F"""&page={i + 2}""" ,headers=SCREAMING_SNAKE_CASE__ ).json()
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return job_links
except Exception:
print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = None
if token is not None:
lowerCAmelCase : str = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"""Bearer {token}"""}
lowerCAmelCase : Optional[int] = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"""
lowerCAmelCase : Optional[int] = requests.get(SCREAMING_SNAKE_CASE__ ,headers=SCREAMING_SNAKE_CASE__ ).json()
lowerCAmelCase : List[str] = {}
try:
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
lowerCAmelCase : Optional[int] = math.ceil((result["""total_count"""] - 1_0_0) / 1_0_0 )
for i in range(SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase : int = requests.get(url + F"""&page={i + 2}""" ,headers=SCREAMING_SNAKE_CASE__ ).json()
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
return artifacts
except Exception:
print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Dict = None
if token is not None:
lowerCAmelCase : Optional[Any] = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"""Bearer {token}"""}
lowerCAmelCase : str = requests.get(SCREAMING_SNAKE_CASE__ ,headers=SCREAMING_SNAKE_CASE__ ,allow_redirects=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Union[str, Any] = result.headers["""Location"""]
lowerCAmelCase : Optional[int] = requests.get(SCREAMING_SNAKE_CASE__ ,allow_redirects=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Union[str, Any] = os.path.join(SCREAMING_SNAKE_CASE__ ,F"""{artifact_name}.zip""" )
with open(SCREAMING_SNAKE_CASE__ ,"""wb""" ) as fp:
fp.write(response.content )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : int = []
lowerCAmelCase : Optional[int] = []
lowerCAmelCase : Optional[int] = None
with zipfile.ZipFile(SCREAMING_SNAKE_CASE__ ) as z:
for filename in z.namelist():
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(SCREAMING_SNAKE_CASE__ ) as f:
for line in f:
lowerCAmelCase : Optional[Any] = line.decode("""UTF-8""" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
lowerCAmelCase : str = line[: line.index(""": """ )]
lowerCAmelCase : Optional[int] = line[line.index(""": """ ) + len(""": """ ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("""FAILED """ ):
# `test` is the test method that failed
lowerCAmelCase : Union[str, Any] = line[len("""FAILED """ ) :]
failed_tests.append(SCREAMING_SNAKE_CASE__ )
elif filename == "job_name.txt":
lowerCAmelCase : Union[str, Any] = line
if len(SCREAMING_SNAKE_CASE__ ) != len(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
F"""`errors` and `failed_tests` should have the same number of elements. Got {len(SCREAMING_SNAKE_CASE__ )} for `errors` """
F"""and {len(SCREAMING_SNAKE_CASE__ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"""
""" problem.""" )
lowerCAmelCase : Optional[int] = None
if job_name and job_links:
lowerCAmelCase : Optional[int] = job_links.get(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
# A list with elements of the form (line of error, error, failed test)
lowerCAmelCase : Union[str, Any] = [x + [y] + [job_link] for x, y in zip(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )]
return result
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : str = []
lowerCAmelCase : Union[str, Any] = [os.path.join(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) for p in os.listdir(SCREAMING_SNAKE_CASE__ ) if p.endswith(""".zip""" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(SCREAMING_SNAKE_CASE__ ,job_links=SCREAMING_SNAKE_CASE__ ) )
return errors
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : int = Counter()
counter.update([x[1] for x in logs] )
lowerCAmelCase : List[str] = counter.most_common()
lowerCAmelCase : Union[str, Any] = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
lowerCAmelCase : List[Any] = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]}
lowerCAmelCase : int = dict(sorted(r.items() ,key=lambda SCREAMING_SNAKE_CASE__ : item[1]["count"] ,reverse=SCREAMING_SNAKE_CASE__ ) )
return r
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = test.split("""::""" )[0]
if test.startswith("""tests/models/""" ):
lowerCAmelCase : str = test.split("""/""" )[2]
else:
lowerCAmelCase : List[Any] = None
return test
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : List[Any] = [(x[0], x[1], get_model(x[2] )) for x in logs]
lowerCAmelCase : int = [x for x in logs if x[2] is not None]
lowerCAmelCase : Optional[Any] = {x[2] for x in logs}
lowerCAmelCase : Dict = {}
for test in tests:
lowerCAmelCase : Optional[int] = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
lowerCAmelCase : Tuple = counter.most_common()
lowerCAmelCase : Union[str, Any] = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
lowerCAmelCase : List[Any] = sum(error_counts.values() )
if n_errors > 0:
lowerCAmelCase : Optional[int] = {"""count""": n_errors, """errors""": error_counts}
lowerCAmelCase : Any = dict(sorted(r.items() ,key=lambda SCREAMING_SNAKE_CASE__ : item[1]["count"] ,reverse=SCREAMING_SNAKE_CASE__ ) )
return r
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = """| no. | error | status |"""
lowerCAmelCase : List[Any] = """|-:|:-|:-|"""
lowerCAmelCase : Union[str, Any] = [header, sep]
for error in reduced_by_error:
lowerCAmelCase : List[str] = reduced_by_error[error]["""count"""]
lowerCAmelCase : Any = F"""| {count} | {error[:1_0_0]} | |"""
lines.append(SCREAMING_SNAKE_CASE__ )
return "\n".join(SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : str = """| model | no. of errors | major error | count |"""
lowerCAmelCase : Any = """|-:|-:|-:|-:|"""
lowerCAmelCase : str = [header, sep]
for model in reduced_by_model:
lowerCAmelCase : Any = reduced_by_model[model]["""count"""]
lowerCAmelCase , lowerCAmelCase : Optional[int] = list(reduced_by_model[model]["""errors"""].items() )[0]
lowerCAmelCase : Optional[Any] = F"""| {model} | {count} | {error[:6_0]} | {_count} |"""
lines.append(SCREAMING_SNAKE_CASE__ )
return "\n".join(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
lowerCAmelCase : int =argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
lowerCAmelCase : Dict =parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
lowerCAmelCase : Optional[int] =get_job_links(args.workflow_run_id, token=args.token)
lowerCAmelCase : List[Any] ={}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
lowerCAmelCase : str =k.find(' / ')
lowerCAmelCase : Any =k[index + len(' / ') :]
lowerCAmelCase : str =v
with open(os.path.join(args.output_dir, 'job_links.json'), 'w', encoding='UTF-8') as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
lowerCAmelCase : Any =get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
lowerCAmelCase : List[Any] =get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
lowerCAmelCase : str =Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
lowerCAmelCase : int =counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, 'errors.json'), 'w', encoding='UTF-8') as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
lowerCAmelCase : Optional[int] =reduce_by_error(errors)
lowerCAmelCase : Tuple =reduce_by_model(errors)
lowerCAmelCase : Optional[Any] =make_github_table(reduced_by_error)
lowerCAmelCase : Union[str, Any] =make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, 'reduced_by_error.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, 'reduced_by_model.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
| 693 |
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
lowerCAmelCase : int =logging.getLogger()
lowerCAmelCase : str =logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class _a ( snake_case_ ):
def _snake_case ( self , lowercase_ ) -> List[Any]:
os.makedirs(lowercase_ , exist_ok=lowercase_ )
lowerCAmelCase : int = {"""source""": """What is love ?""", """target""": """life"""}
lowerCAmelCase : Optional[Any] = {"""train""": 12, """val""": 2, """test""": 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
lowerCAmelCase : Tuple = """\n""".join([contents[field]] * n_lines[split] )
with open(os.path.join(lowercase_ , f"""{split}.{field}""" ) , """w""" ) as f:
f.write(lowercase_ )
def _snake_case ( self , lowercase_ , lowercase_ = "pytorch" ) -> str:
lowerCAmelCase : Dict = self.get_auto_remove_tmp_dir()
lowerCAmelCase : Union[str, Any] = os.path.join(lowercase_ , """output""" )
lowerCAmelCase : Dict = os.path.join(lowercase_ , """data""" )
self._create_dummy_data(data_dir=lowercase_ )
lowerCAmelCase : str = f"""
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
""".split()
if gpus > 0:
testargs.append(f"""--gpus={gpus}""" )
if is_apex_available():
testargs.append("""--fp16""" )
else:
testargs.append("""--gpus=0""" )
testargs.append("""--distributed_backend=ddp_cpu""" )
testargs.append("""--num_processes=2""" )
lowerCAmelCase : Optional[int] = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(lowercase_ , env=self.get_env() )
lowerCAmelCase : Union[str, Any] = os.path.join(lowercase_ , """metrics.json""" )
with open(lowercase_ ) as f:
lowerCAmelCase : List[str] = json.load(lowercase_ )
return result
@require_torch_gpu
def _snake_case ( self ) -> Any:
lowerCAmelCase : Tuple = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_multi_gpu
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase : Dict = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_gpu
@require_ray
def _snake_case ( self ) -> int:
lowerCAmelCase : Optional[Any] = self._run_finetune(gpus=1 , distributed_retriever="""ray""" )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_multi_gpu
@require_ray
def _snake_case ( self ) -> Optional[Any]:
lowerCAmelCase : Optional[Any] = self._run_finetune(gpus=1 , distributed_retriever="""ray""" )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
| 693 | 1 |
from math import sqrt
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = 0
for i in range(1 ,int(sqrt(SCREAMING_SNAKE_CASE__ ) + 1 ) ):
if n % i == 0 and i != sqrt(SCREAMING_SNAKE_CASE__ ):
total += i + n // i
elif i == sqrt(SCREAMING_SNAKE_CASE__ ):
total += i
return total - n
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ = 1_0_0_0_0 ):
'''simple docstring'''
lowerCAmelCase : Any = sum(
i
for i in range(1 ,SCREAMING_SNAKE_CASE__ )
if sum_of_divisors(sum_of_divisors(SCREAMING_SNAKE_CASE__ ) ) == i and sum_of_divisors(SCREAMING_SNAKE_CASE__ ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 693 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Optional[int] =logging.get_logger(__name__)
lowerCAmelCase : Optional[int] ={
'transfo-xl-wt103': 'https://huggingface.co/transfo-xl-wt103/resolve/main/config.json',
}
class _a ( snake_case_ ):
_UpperCamelCase: Tuple = "transfo-xl"
_UpperCamelCase: str = ["mems"]
_UpperCamelCase: Dict = {
"n_token": "vocab_size",
"hidden_size": "d_model",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , lowercase_=267735 , lowercase_=[20000, 40000, 200000] , lowercase_=1024 , lowercase_=1024 , lowercase_=16 , lowercase_=64 , lowercase_=4096 , lowercase_=4 , lowercase_=False , lowercase_=18 , lowercase_=1600 , lowercase_=1000 , lowercase_=True , lowercase_=True , lowercase_=0 , lowercase_=-1 , lowercase_=True , lowercase_=0.1 , lowercase_=0.0 , lowercase_=True , lowercase_="normal" , lowercase_=0.0_1 , lowercase_=0.0_1 , lowercase_=0.0_2 , lowercase_=1e-5 , lowercase_=0 , **lowercase_ , ) -> Optional[int]:
lowerCAmelCase : List[str] = vocab_size
lowerCAmelCase : Union[str, Any] = []
self.cutoffs.extend(lowercase_ )
if proj_share_all_but_first:
lowerCAmelCase : Optional[int] = [False] + [True] * len(self.cutoffs )
else:
lowerCAmelCase : List[str] = [False] + [False] * len(self.cutoffs )
lowerCAmelCase : Optional[int] = d_model
lowerCAmelCase : List[Any] = d_embed
lowerCAmelCase : Union[str, Any] = d_head
lowerCAmelCase : List[Any] = d_inner
lowerCAmelCase : Optional[int] = div_val
lowerCAmelCase : List[Any] = pre_lnorm
lowerCAmelCase : Dict = n_layer
lowerCAmelCase : Tuple = n_head
lowerCAmelCase : Any = mem_len
lowerCAmelCase : Union[str, Any] = same_length
lowerCAmelCase : List[Any] = attn_type
lowerCAmelCase : int = clamp_len
lowerCAmelCase : List[str] = sample_softmax
lowerCAmelCase : Optional[int] = adaptive
lowerCAmelCase : Dict = dropout
lowerCAmelCase : Optional[Any] = dropatt
lowerCAmelCase : List[str] = untie_r
lowerCAmelCase : List[str] = init
lowerCAmelCase : Tuple = init_range
lowerCAmelCase : str = proj_init_std
lowerCAmelCase : str = init_std
lowerCAmelCase : Optional[int] = layer_norm_epsilon
super().__init__(eos_token_id=lowercase_ , **lowercase_ )
@property
def _snake_case ( self ) -> Optional[Any]:
# Message copied from Transformer-XL documentation
logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def _snake_case ( self , lowercase_ ) -> Dict:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 693 | 1 |
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/text-classification/requirements.txt')
lowerCAmelCase : Union[str, Any] =logging.getLogger(__name__)
@dataclass
class _a :
_UpperCamelCase: Optional[int] = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
_UpperCamelCase: bool = field(
default=snake_case_ , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
_UpperCamelCase: bool = field(
default=snake_case_ , metadata={
"help": (
"Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
)
} , )
_UpperCamelCase: Optional[int] = field(
default=snake_case_ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
_UpperCamelCase: Optional[int] = field(
default=snake_case_ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
_UpperCamelCase: Optional[int] = field(
default=snake_case_ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
)
} , )
@dataclass
class _a :
_UpperCamelCase: str = field(
default=snake_case_ , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
_UpperCamelCase: str = field(
default=snake_case_ , metadata={"help": "Evaluation language. Also train language if `train_language` is set to None."} )
_UpperCamelCase: Optional[str] = field(
default=snake_case_ , metadata={"help": "Train language if it is different from the evaluation language."} )
_UpperCamelCase: Optional[str] = field(
default=snake_case_ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
_UpperCamelCase: Optional[str] = field(
default=snake_case_ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
_UpperCamelCase: Optional[str] = field(
default=snake_case_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
_UpperCamelCase: Optional[bool] = field(
default=snake_case_ , metadata={"help": "arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()"} , )
_UpperCamelCase: bool = field(
default=snake_case_ , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
_UpperCamelCase: str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
_UpperCamelCase: bool = field(
default=snake_case_ , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
_UpperCamelCase: bool = field(
default=snake_case_ , metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} , )
def _UpperCAmelCase ( ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Optional[Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_xnli""" ,SCREAMING_SNAKE_CASE__ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" ,datefmt="""%m/%d/%Y %H:%M:%S""" ,handlers=[logging.StreamHandler(sys.stdout )] ,)
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCAmelCase : List[str] = training_args.get_process_log_level()
logger.setLevel(SCREAMING_SNAKE_CASE__ )
datasets.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE__ )
transformers.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
lowerCAmelCase : Tuple = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCAmelCase : Dict = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
lowerCAmelCase : Any = load_dataset(
"""xnli""" ,model_args.language ,split="""train""" ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,)
else:
lowerCAmelCase : Tuple = load_dataset(
"""xnli""" ,model_args.train_language ,split="""train""" ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,)
lowerCAmelCase : Union[str, Any] = train_dataset.features["""label"""].names
if training_args.do_eval:
lowerCAmelCase : List[Any] = load_dataset(
"""xnli""" ,model_args.language ,split="""validation""" ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,)
lowerCAmelCase : Optional[Any] = eval_dataset.features["""label"""].names
if training_args.do_predict:
lowerCAmelCase : List[Any] = load_dataset(
"""xnli""" ,model_args.language ,split="""test""" ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,)
lowerCAmelCase : Tuple = predict_dataset.features["""label"""].names
# Labels
lowerCAmelCase : Any = len(SCREAMING_SNAKE_CASE__ )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCAmelCase : Dict = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path ,num_labels=SCREAMING_SNAKE_CASE__ ,idalabel={str(SCREAMING_SNAKE_CASE__ ): label for i, label in enumerate(SCREAMING_SNAKE_CASE__ )} ,labelaid={label: i for i, label in enumerate(SCREAMING_SNAKE_CASE__ )} ,finetuning_task="""xnli""" ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
lowerCAmelCase : Tuple = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,do_lower_case=model_args.do_lower_case ,cache_dir=model_args.cache_dir ,use_fast=model_args.use_fast_tokenizer ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
lowerCAmelCase : List[Any] = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path ,from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) ,config=SCREAMING_SNAKE_CASE__ ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,ignore_mismatched_sizes=model_args.ignore_mismatched_sizes ,)
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
lowerCAmelCase : Any = """max_length"""
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
lowerCAmelCase : Any = False
def preprocess_function(SCREAMING_SNAKE_CASE__ ):
# Tokenize the texts
return tokenizer(
examples["""premise"""] ,examples["""hypothesis"""] ,padding=SCREAMING_SNAKE_CASE__ ,max_length=data_args.max_seq_length ,truncation=SCREAMING_SNAKE_CASE__ ,)
if training_args.do_train:
if data_args.max_train_samples is not None:
lowerCAmelCase : List[Any] = min(len(SCREAMING_SNAKE_CASE__ ) ,data_args.max_train_samples )
lowerCAmelCase : List[str] = train_dataset.select(range(SCREAMING_SNAKE_CASE__ ) )
with training_args.main_process_first(desc="""train dataset map pre-processing""" ):
lowerCAmelCase : Union[str, Any] = train_dataset.map(
SCREAMING_SNAKE_CASE__ ,batched=SCREAMING_SNAKE_CASE__ ,load_from_cache_file=not data_args.overwrite_cache ,desc="""Running tokenizer on train dataset""" ,)
# Log a few random samples from the training set:
for index in random.sample(range(len(SCREAMING_SNAKE_CASE__ ) ) ,3 ):
logger.info(F"""Sample {index} of the training set: {train_dataset[index]}.""" )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
lowerCAmelCase : Optional[int] = min(len(SCREAMING_SNAKE_CASE__ ) ,data_args.max_eval_samples )
lowerCAmelCase : Union[str, Any] = eval_dataset.select(range(SCREAMING_SNAKE_CASE__ ) )
with training_args.main_process_first(desc="""validation dataset map pre-processing""" ):
lowerCAmelCase : Union[str, Any] = eval_dataset.map(
SCREAMING_SNAKE_CASE__ ,batched=SCREAMING_SNAKE_CASE__ ,load_from_cache_file=not data_args.overwrite_cache ,desc="""Running tokenizer on validation dataset""" ,)
if training_args.do_predict:
if data_args.max_predict_samples is not None:
lowerCAmelCase : Optional[int] = min(len(SCREAMING_SNAKE_CASE__ ) ,data_args.max_predict_samples )
lowerCAmelCase : Dict = predict_dataset.select(range(SCREAMING_SNAKE_CASE__ ) )
with training_args.main_process_first(desc="""prediction dataset map pre-processing""" ):
lowerCAmelCase : Tuple = predict_dataset.map(
SCREAMING_SNAKE_CASE__ ,batched=SCREAMING_SNAKE_CASE__ ,load_from_cache_file=not data_args.overwrite_cache ,desc="""Running tokenizer on prediction dataset""" ,)
# Get the metric function
lowerCAmelCase : str = evaluate.load("""xnli""" )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase : int = p.predictions[0] if isinstance(p.predictions ,SCREAMING_SNAKE_CASE__ ) else p.predictions
lowerCAmelCase : str = np.argmax(SCREAMING_SNAKE_CASE__ ,axis=1 )
return metric.compute(predictions=SCREAMING_SNAKE_CASE__ ,references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
lowerCAmelCase : Optional[Any] = default_data_collator
elif training_args.fpaa:
lowerCAmelCase : Tuple = DataCollatorWithPadding(SCREAMING_SNAKE_CASE__ ,pad_to_multiple_of=8 )
else:
lowerCAmelCase : Dict = None
# Initialize our Trainer
lowerCAmelCase : List[Any] = Trainer(
model=SCREAMING_SNAKE_CASE__ ,args=SCREAMING_SNAKE_CASE__ ,train_dataset=train_dataset if training_args.do_train else None ,eval_dataset=eval_dataset if training_args.do_eval else None ,compute_metrics=SCREAMING_SNAKE_CASE__ ,tokenizer=SCREAMING_SNAKE_CASE__ ,data_collator=SCREAMING_SNAKE_CASE__ ,)
# Training
if training_args.do_train:
lowerCAmelCase : Union[str, Any] = None
if training_args.resume_from_checkpoint is not None:
lowerCAmelCase : Optional[Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCAmelCase : str = last_checkpoint
lowerCAmelCase : Union[str, Any] = trainer.train(resume_from_checkpoint=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : List[Any] = train_result.metrics
lowerCAmelCase : Optional[Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(SCREAMING_SNAKE_CASE__ )
)
lowerCAmelCase : Tuple = min(SCREAMING_SNAKE_CASE__ ,len(SCREAMING_SNAKE_CASE__ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("""train""" ,SCREAMING_SNAKE_CASE__ )
trainer.save_metrics("""train""" ,SCREAMING_SNAKE_CASE__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
lowerCAmelCase : Union[str, Any] = trainer.evaluate(eval_dataset=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Any = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : List[Any] = min(SCREAMING_SNAKE_CASE__ ,len(SCREAMING_SNAKE_CASE__ ) )
trainer.log_metrics("""eval""" ,SCREAMING_SNAKE_CASE__ )
trainer.save_metrics("""eval""" ,SCREAMING_SNAKE_CASE__ )
# Prediction
if training_args.do_predict:
logger.info("""*** Predict ***""" )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : List[Any] = trainer.predict(SCREAMING_SNAKE_CASE__ ,metric_key_prefix="""predict""" )
lowerCAmelCase : List[Any] = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(SCREAMING_SNAKE_CASE__ )
)
lowerCAmelCase : Union[str, Any] = min(SCREAMING_SNAKE_CASE__ ,len(SCREAMING_SNAKE_CASE__ ) )
trainer.log_metrics("""predict""" ,SCREAMING_SNAKE_CASE__ )
trainer.save_metrics("""predict""" ,SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Optional[Any] = np.argmax(SCREAMING_SNAKE_CASE__ ,axis=1 )
lowerCAmelCase : Optional[int] = os.path.join(training_args.output_dir ,"""predictions.txt""" )
if trainer.is_world_process_zero():
with open(SCREAMING_SNAKE_CASE__ ,"""w""" ) as writer:
writer.write("""index\tprediction\n""" )
for index, item in enumerate(SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase : List[str] = label_list[item]
writer.write(F"""{index}\t{item}\n""" )
if __name__ == "__main__":
main()
| 693 |
import torch
from diffusers import DiffusionPipeline
class _a ( snake_case_ ):
def __init__( self , lowercase_ , lowercase_ ) -> int:
super().__init__()
self.register_modules(unet=lowercase_ , scheduler=lowercase_ )
def __call__( self ) -> List[Any]:
lowerCAmelCase : Union[str, Any] = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
lowerCAmelCase : Union[str, Any] = 1
lowerCAmelCase : Dict = self.unet(lowercase_ , lowercase_ ).sample
lowerCAmelCase : str = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ ).prev_sample
lowerCAmelCase : Dict = scheduler_output - scheduler_output + torch.ones_like(lowercase_ )
return result
| 693 | 1 |
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if (ksize % 2) == 0:
lowerCAmelCase : int = ksize + 1
lowerCAmelCase : Optional[int] = np.zeros((ksize, ksize) ,dtype=np.floataa )
# each value
for y in range(SCREAMING_SNAKE_CASE__ ):
for x in range(SCREAMING_SNAKE_CASE__ ):
# distance from center
lowerCAmelCase : str = x - ksize // 2
lowerCAmelCase : Union[str, Any] = y - ksize // 2
# degree to radiant
lowerCAmelCase : Tuple = theta / 1_8_0 * np.pi
lowerCAmelCase : Union[str, Any] = np.cos(_theta )
lowerCAmelCase : Union[str, Any] = np.sin(_theta )
# get kernel x
lowerCAmelCase : str = cos_theta * px + sin_theta * py
# get kernel y
lowerCAmelCase : int = -sin_theta * px + cos_theta * py
# fill kernel
lowerCAmelCase : Optional[int] = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
lowerCAmelCase : Tuple =imread('../image_data/lena.jpg')
# turn image in gray scale value
lowerCAmelCase : Optional[int] =cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
lowerCAmelCase : Optional[Any] =np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
lowerCAmelCase : List[str] =gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
lowerCAmelCase : str =out / out.max() * 255
lowerCAmelCase : Tuple =out.astype(np.uinta)
imshow('Original', gray)
imshow('Gabor filter with 20x20 mask and 6 directions', out)
waitKey(0)
| 693 |
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def _UpperCAmelCase ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
requests.request("""GET""" ,"""https://huggingface.co""" )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request("""GET""" ,"""https://huggingface.co""" ,timeout=1.0 )
@pytest.mark.integration
def _UpperCAmelCase ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request("""GET""" ,"""https://huggingface.co""" )
def _UpperCAmelCase ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
http_head("""https://huggingface.co""" )
| 693 | 1 |
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
lowerCAmelCase : Union[str, Any] =HfArgumentParser(InitializationArguments)
lowerCAmelCase : Any =parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
lowerCAmelCase : Optional[Any] =AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
lowerCAmelCase : Any ={
'vocab_size': len(tokenizer),
'scale_attn_by_inverse_layer_idx': True,
'reorder_and_upcast_attn': True,
}
# Load model config (GPT-2 large in this case)
lowerCAmelCase : str =AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
lowerCAmelCase : Any =AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 693 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class _a ( unittest.TestCase ):
def __init__( self , lowercase_ , lowercase_=7 , lowercase_=3 , lowercase_=30 , lowercase_=400 , lowercase_=True , lowercase_=None , lowercase_=True , lowercase_=1 / 255 , lowercase_=True , lowercase_=[0.5, 0.5, 0.5] , lowercase_=[0.5, 0.5, 0.5] , lowercase_=True , ) -> Tuple:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
lowerCAmelCase : Optional[Any] = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333}
lowerCAmelCase : Optional[int] = parent
lowerCAmelCase : Optional[int] = batch_size
lowerCAmelCase : Dict = num_channels
lowerCAmelCase : str = min_resolution
lowerCAmelCase : Optional[Any] = max_resolution
lowerCAmelCase : Optional[int] = do_resize
lowerCAmelCase : List[str] = size
lowerCAmelCase : Dict = do_rescale
lowerCAmelCase : Union[str, Any] = rescale_factor
lowerCAmelCase : int = do_normalize
lowerCAmelCase : Union[str, Any] = image_mean
lowerCAmelCase : Dict = image_std
lowerCAmelCase : Optional[int] = do_pad
def _snake_case ( self ) -> Any:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def _snake_case ( self , lowercase_ , lowercase_=False ) -> List[Any]:
if not batched:
lowerCAmelCase : Tuple = image_inputs[0]
if isinstance(lowercase_ , Image.Image ):
lowerCAmelCase , lowerCAmelCase : Dict = image.size
else:
lowerCAmelCase , lowerCAmelCase : Tuple = image.shape[1], image.shape[2]
if w < h:
lowerCAmelCase : Union[str, Any] = int(self.size["""shortest_edge"""] * h / w )
lowerCAmelCase : Optional[Any] = self.size["""shortest_edge"""]
elif w > h:
lowerCAmelCase : List[Any] = self.size["""shortest_edge"""]
lowerCAmelCase : List[Any] = int(self.size["""shortest_edge"""] * w / h )
else:
lowerCAmelCase : Optional[int] = self.size["""shortest_edge"""]
lowerCAmelCase : List[str] = self.size["""shortest_edge"""]
else:
lowerCAmelCase : Optional[int] = []
for image in image_inputs:
lowerCAmelCase , lowerCAmelCase : int = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCAmelCase : Any = max(lowercase_ , key=lambda lowercase_ : item[0] )[0]
lowerCAmelCase : Union[str, Any] = max(lowercase_ , key=lambda lowercase_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _a ( snake_case_ , unittest.TestCase ):
_UpperCamelCase: Optional[Any] = DetrImageProcessor if is_vision_available() else None
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase : List[str] = DetrImageProcessingTester(self )
@property
def _snake_case ( self ) -> str:
return self.image_processor_tester.prepare_image_processor_dict()
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase_ , """image_mean""" ) )
self.assertTrue(hasattr(lowercase_ , """image_std""" ) )
self.assertTrue(hasattr(lowercase_ , """do_normalize""" ) )
self.assertTrue(hasattr(lowercase_ , """do_rescale""" ) )
self.assertTrue(hasattr(lowercase_ , """rescale_factor""" ) )
self.assertTrue(hasattr(lowercase_ , """do_resize""" ) )
self.assertTrue(hasattr(lowercase_ , """size""" ) )
self.assertTrue(hasattr(lowercase_ , """do_pad""" ) )
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333} )
self.assertEqual(image_processor.do_pad , lowercase_ )
lowerCAmelCase : Optional[Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowercase_ )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad , lowercase_ )
def _snake_case ( self ) -> List[Any]:
pass
def _snake_case ( self ) -> List[Any]:
# Initialize image_processing
lowerCAmelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , Image.Image )
# Test not batched input
lowerCAmelCase : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
lowerCAmelCase , lowerCAmelCase : Tuple = self.image_processor_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase , lowerCAmelCase : str = self.image_processor_tester.get_expected_values(lowercase_ , batched=lowercase_ )
lowerCAmelCase : Optional[int] = image_processing(lowercase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _snake_case ( self ) -> Optional[int]:
# Initialize image_processing
lowerCAmelCase : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , numpify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , np.ndarray )
# Test not batched input
lowerCAmelCase : List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
lowerCAmelCase , lowerCAmelCase : Dict = self.image_processor_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase : List[Any] = image_processing(lowercase_ , return_tensors="""pt""" ).pixel_values
lowerCAmelCase , lowerCAmelCase : int = self.image_processor_tester.get_expected_values(lowercase_ , batched=lowercase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _snake_case ( self ) -> List[str]:
# Initialize image_processing
lowerCAmelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , torchify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , torch.Tensor )
# Test not batched input
lowerCAmelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
lowerCAmelCase , lowerCAmelCase : str = self.image_processor_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase : str = image_processing(lowercase_ , return_tensors="""pt""" ).pixel_values
lowerCAmelCase , lowerCAmelCase : List[str] = self.image_processor_tester.get_expected_values(lowercase_ , batched=lowercase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _snake_case ( self ) -> int:
# prepare image and target
lowerCAmelCase : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
lowerCAmelCase : str = json.loads(f.read() )
lowerCAmelCase : List[Any] = {"""image_id""": 39769, """annotations""": target}
# encode them
lowerCAmelCase : Dict = DetrImageProcessor.from_pretrained("""facebook/detr-resnet-50""" )
lowerCAmelCase : List[str] = image_processing(images=lowercase_ , annotations=lowercase_ , return_tensors="""pt""" )
# verify pixel values
lowerCAmelCase : Union[str, Any] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , lowercase_ )
lowerCAmelCase : Union[str, Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , lowercase_ , atol=1e-4 ) )
# verify area
lowerCAmelCase : List[str] = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , lowercase_ ) )
# verify boxes
lowerCAmelCase : Tuple = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , lowercase_ )
lowerCAmelCase : Dict = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , lowercase_ , atol=1e-3 ) )
# verify image_id
lowerCAmelCase : List[Any] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , lowercase_ ) )
# verify is_crowd
lowerCAmelCase : Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , lowercase_ ) )
# verify class_labels
lowerCAmelCase : str = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , lowercase_ ) )
# verify orig_size
lowerCAmelCase : int = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , lowercase_ ) )
# verify size
lowerCAmelCase : str = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , lowercase_ ) )
@slow
def _snake_case ( self ) -> int:
# prepare image, target and masks_path
lowerCAmelCase : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
lowerCAmelCase : Any = json.loads(f.read() )
lowerCAmelCase : Optional[Any] = {"""file_name""": """000000039769.png""", """image_id""": 39769, """segments_info""": target}
lowerCAmelCase : List[str] = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
lowerCAmelCase : Any = DetrImageProcessor.from_pretrained("""facebook/detr-resnet-50-panoptic""" )
lowerCAmelCase : Tuple = image_processing(images=lowercase_ , annotations=lowercase_ , masks_path=lowercase_ , return_tensors="""pt""" )
# verify pixel values
lowerCAmelCase : Tuple = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , lowercase_ )
lowerCAmelCase : str = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , lowercase_ , atol=1e-4 ) )
# verify area
lowerCAmelCase : Union[str, Any] = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , lowercase_ ) )
# verify boxes
lowerCAmelCase : Optional[int] = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , lowercase_ )
lowerCAmelCase : Union[str, Any] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , lowercase_ , atol=1e-3 ) )
# verify image_id
lowerCAmelCase : Tuple = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , lowercase_ ) )
# verify is_crowd
lowerCAmelCase : Any = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , lowercase_ ) )
# verify class_labels
lowerCAmelCase : Tuple = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , lowercase_ ) )
# verify masks
lowerCAmelCase : Union[str, Any] = 822873
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , lowercase_ )
# verify orig_size
lowerCAmelCase : str = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , lowercase_ ) )
# verify size
lowerCAmelCase : List[str] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , lowercase_ ) )
| 693 | 1 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : str = [1]
for i in range(2 ,SCREAMING_SNAKE_CASE__ ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
lowerCAmelCase : List[Any] = []
lowerCAmelCase : Any = list(range(SCREAMING_SNAKE_CASE__ ) )
# Find permutation
while factorials:
lowerCAmelCase : Dict = factorials.pop()
lowerCAmelCase , lowerCAmelCase : Tuple = divmod(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 693 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Tuple = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Dict = 0
while b > 0:
if b & 1:
lowerCAmelCase : Optional[int] = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 693 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : str =logging.get_logger(__name__)
lowerCAmelCase : List[Any] ={
'google/vivit-b-16x2-kinetics400': (
'https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json'
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class _a ( snake_case_ ):
_UpperCamelCase: Optional[Any] = "vivit"
def __init__( self , lowercase_=224 , lowercase_=32 , lowercase_=[2, 16, 16] , lowercase_=3 , lowercase_=768 , lowercase_=12 , lowercase_=12 , lowercase_=3072 , lowercase_="gelu_fast" , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.0_2 , lowercase_=1e-06 , lowercase_=True , **lowercase_ , ) -> Union[str, Any]:
lowerCAmelCase : Optional[int] = hidden_size
lowerCAmelCase : List[Any] = num_hidden_layers
lowerCAmelCase : Any = num_attention_heads
lowerCAmelCase : Optional[int] = intermediate_size
lowerCAmelCase : List[str] = hidden_act
lowerCAmelCase : int = hidden_dropout_prob
lowerCAmelCase : str = attention_probs_dropout_prob
lowerCAmelCase : Dict = initializer_range
lowerCAmelCase : Optional[int] = layer_norm_eps
lowerCAmelCase : Union[str, Any] = image_size
lowerCAmelCase : List[str] = num_frames
lowerCAmelCase : Optional[Any] = tubelet_size
lowerCAmelCase : Optional[Any] = num_channels
lowerCAmelCase : Union[str, Any] = qkv_bias
super().__init__(**lowercase_ )
| 693 |
from math import factorial
class _a :
def __init__( self , lowercase_ , lowercase_ ) -> Optional[Any]:
lowerCAmelCase : Union[str, Any] = real
if isinstance(lowercase_ , lowercase_ ):
lowerCAmelCase : Tuple = [1] * rank
else:
lowerCAmelCase : Any = rank
def __repr__( self ) -> int:
return (
f"""{self.real}+"""
f"""{'+'.join(str(lowercase_ )+'E'+str(n+1 )for n,dual in enumerate(self.duals ) )}"""
)
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase : List[Any] = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , lowercase_ )
def __add__( self , lowercase_ ) -> Tuple:
if not isinstance(lowercase_ , lowercase_ ):
return Dual(self.real + other , self.duals )
lowerCAmelCase : int = self.duals.copy()
lowerCAmelCase : Tuple = other.duals.copy()
if len(lowercase_ ) > len(lowercase_ ):
o_dual.extend([1] * (len(lowercase_ ) - len(lowercase_ )) )
elif len(lowercase_ ) < len(lowercase_ ):
s_dual.extend([1] * (len(lowercase_ ) - len(lowercase_ )) )
lowerCAmelCase : List[Any] = []
for i in range(len(lowercase_ ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , lowercase_ )
_UpperCamelCase: List[Any] = __add__
def __sub__( self , lowercase_ ) -> Union[str, Any]:
return self + other * -1
def __mul__( self , lowercase_ ) -> Optional[int]:
if not isinstance(lowercase_ , lowercase_ ):
lowerCAmelCase : Union[str, Any] = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , lowercase_ )
lowerCAmelCase : Union[str, Any] = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , lowercase_ )
_UpperCamelCase: str = __mul__
def __truediv__( self , lowercase_ ) -> Optional[Any]:
if not isinstance(lowercase_ , lowercase_ ):
lowerCAmelCase : List[str] = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , lowercase_ )
raise ValueError
def __floordiv__( self , lowercase_ ) -> int:
if not isinstance(lowercase_ , lowercase_ ):
lowerCAmelCase : List[Any] = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , lowercase_ )
raise ValueError
def __pow__( self , lowercase_ ) -> str:
if n < 0 or isinstance(lowercase_ , lowercase_ ):
raise ValueError("""power must be a positive integer""" )
if n == 0:
return 1
if n == 1:
return self
lowerCAmelCase : int = self
for _ in range(n - 1 ):
x *= self
return x
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if not callable(SCREAMING_SNAKE_CASE__ ):
raise ValueError("""differentiate() requires a function as input for func""" )
if not isinstance(SCREAMING_SNAKE_CASE__ ,(float, int) ):
raise ValueError("""differentiate() requires a float as input for position""" )
if not isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
raise ValueError("""differentiate() requires an int as input for order""" )
lowerCAmelCase : List[Any] = Dual(SCREAMING_SNAKE_CASE__ ,1 )
lowerCAmelCase : Optional[Any] = func(SCREAMING_SNAKE_CASE__ )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return y**2 * y**4
print(differentiate(f, 9, 2))
| 693 | 1 |
from math import factorial
class _a :
def __init__( self , lowercase_ , lowercase_ ) -> Optional[Any]:
lowerCAmelCase : Union[str, Any] = real
if isinstance(lowercase_ , lowercase_ ):
lowerCAmelCase : Tuple = [1] * rank
else:
lowerCAmelCase : Any = rank
def __repr__( self ) -> int:
return (
f"""{self.real}+"""
f"""{'+'.join(str(lowercase_ )+'E'+str(n+1 )for n,dual in enumerate(self.duals ) )}"""
)
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase : List[Any] = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , lowercase_ )
def __add__( self , lowercase_ ) -> Tuple:
if not isinstance(lowercase_ , lowercase_ ):
return Dual(self.real + other , self.duals )
lowerCAmelCase : int = self.duals.copy()
lowerCAmelCase : Tuple = other.duals.copy()
if len(lowercase_ ) > len(lowercase_ ):
o_dual.extend([1] * (len(lowercase_ ) - len(lowercase_ )) )
elif len(lowercase_ ) < len(lowercase_ ):
s_dual.extend([1] * (len(lowercase_ ) - len(lowercase_ )) )
lowerCAmelCase : List[Any] = []
for i in range(len(lowercase_ ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , lowercase_ )
_UpperCamelCase: List[Any] = __add__
def __sub__( self , lowercase_ ) -> Union[str, Any]:
return self + other * -1
def __mul__( self , lowercase_ ) -> Optional[int]:
if not isinstance(lowercase_ , lowercase_ ):
lowerCAmelCase : Union[str, Any] = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , lowercase_ )
lowerCAmelCase : Union[str, Any] = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , lowercase_ )
_UpperCamelCase: str = __mul__
def __truediv__( self , lowercase_ ) -> Optional[Any]:
if not isinstance(lowercase_ , lowercase_ ):
lowerCAmelCase : List[str] = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , lowercase_ )
raise ValueError
def __floordiv__( self , lowercase_ ) -> int:
if not isinstance(lowercase_ , lowercase_ ):
lowerCAmelCase : List[Any] = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , lowercase_ )
raise ValueError
def __pow__( self , lowercase_ ) -> str:
if n < 0 or isinstance(lowercase_ , lowercase_ ):
raise ValueError("""power must be a positive integer""" )
if n == 0:
return 1
if n == 1:
return self
lowerCAmelCase : int = self
for _ in range(n - 1 ):
x *= self
return x
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if not callable(SCREAMING_SNAKE_CASE__ ):
raise ValueError("""differentiate() requires a function as input for func""" )
if not isinstance(SCREAMING_SNAKE_CASE__ ,(float, int) ):
raise ValueError("""differentiate() requires a float as input for position""" )
if not isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
raise ValueError("""differentiate() requires an int as input for order""" )
lowerCAmelCase : List[Any] = Dual(SCREAMING_SNAKE_CASE__ ,1 )
lowerCAmelCase : Optional[Any] = func(SCREAMING_SNAKE_CASE__ )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return y**2 * y**4
print(differentiate(f, 9, 2))
| 693 |
from ..utils import DummyObject, requires_backends
class _a ( metaclass=snake_case_ ):
_UpperCamelCase: List[Any] = ["keras_nlp"]
def __init__( self , *lowercase_ , **lowercase_ ) -> Tuple:
requires_backends(self , ["""keras_nlp"""] )
| 693 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : str =logging.get_logger(__name__)
lowerCAmelCase : Optional[int] ={
'uclanlp/visualbert-vqa': 'https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json',
'uclanlp/visualbert-vqa-pre': 'https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json',
'uclanlp/visualbert-vqa-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-vcr': 'https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json',
'uclanlp/visualbert-vcr-pre': 'https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json',
'uclanlp/visualbert-vcr-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-nlvr2': 'https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-pre': 'https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json'
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class _a ( snake_case_ ):
_UpperCamelCase: Optional[int] = "visual_bert"
def __init__( self , lowercase_=30522 , lowercase_=768 , lowercase_=512 , lowercase_=12 , lowercase_=12 , lowercase_=3072 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=512 , lowercase_=2 , lowercase_=0.0_2 , lowercase_=1e-12 , lowercase_=False , lowercase_=True , lowercase_=1 , lowercase_=0 , lowercase_=2 , **lowercase_ , ) -> Dict:
super().__init__(pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
lowerCAmelCase : Dict = vocab_size
lowerCAmelCase : int = max_position_embeddings
lowerCAmelCase : Dict = hidden_size
lowerCAmelCase : Dict = visual_embedding_dim
lowerCAmelCase : str = num_hidden_layers
lowerCAmelCase : Tuple = num_attention_heads
lowerCAmelCase : Dict = intermediate_size
lowerCAmelCase : str = hidden_act
lowerCAmelCase : List[str] = hidden_dropout_prob
lowerCAmelCase : Optional[Any] = attention_probs_dropout_prob
lowerCAmelCase : Union[str, Any] = initializer_range
lowerCAmelCase : Dict = type_vocab_size
lowerCAmelCase : Any = layer_norm_eps
lowerCAmelCase : Union[str, Any] = bypass_transformer
lowerCAmelCase : Union[str, Any] = special_visual_initialize
| 693 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 693 | 1 |
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
lowerCAmelCase : Dict =logging.get_logger(__name__)
class _a ( snake_case_ ):
def __init__( self , *lowercase_ , **lowercase_ ) -> None:
warnings.warn(
"""The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use CLIPImageProcessor instead.""" , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_ )
| 693 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if p < 2:
raise ValueError("""p should not be less than 2!""" )
elif p == 2:
return True
lowerCAmelCase : List[Any] = 4
lowerCAmelCase : Tuple = (1 << p) - 1
for _ in range(p - 2 ):
lowerCAmelCase : Dict = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 693 | 1 |
# Algorithm for the pigeonhole sorting
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = min(SCREAMING_SNAKE_CASE__ ) # min() finds the minimum value
lowerCAmelCase : Any = max(SCREAMING_SNAKE_CASE__ ) # max() finds the maximum value
lowerCAmelCase : Optional[Any] = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
lowerCAmelCase : List[Any] = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
lowerCAmelCase : Dict = 0
for count in range(SCREAMING_SNAKE_CASE__ ):
while holes[count] > 0:
holes[count] -= 1
lowerCAmelCase : Optional[Any] = count + min_val
i += 1
def _UpperCAmelCase ( ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(SCREAMING_SNAKE_CASE__ )
print("""Sorted order is:""" ,""" """.join(SCREAMING_SNAKE_CASE__ ) )
if __name__ == "__main__":
main()
| 693 |
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _a ( snake_case_ , snake_case_ , unittest.TestCase ):
_UpperCamelCase: Optional[Any] = IFImgaImgSuperResolutionPipeline
_UpperCamelCase: int = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"width", "height"}
_UpperCamelCase: Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"original_image"} )
_UpperCamelCase: List[str] = PipelineTesterMixin.required_optional_params - {"latents"}
def _snake_case ( self ) -> int:
return self._get_superresolution_dummy_components()
def _snake_case ( self , lowercase_ , lowercase_=0 ) -> Optional[Any]:
if str(lowercase_ ).startswith("""mps""" ):
lowerCAmelCase : Any = torch.manual_seed(lowercase_ )
else:
lowerCAmelCase : List[Any] = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
lowerCAmelCase : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase_ ) ).to(lowercase_ )
lowerCAmelCase : Optional[Any] = floats_tensor((1, 3, 16, 16) , rng=random.Random(lowercase_ ) ).to(lowercase_ )
lowerCAmelCase : Optional[int] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def _snake_case ( self ) -> Optional[int]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def _snake_case ( self ) -> int:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def _snake_case ( self ) -> Any:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def _snake_case ( self ) -> int:
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def _snake_case ( self ) -> Any:
self._test_save_load_local()
def _snake_case ( self ) -> str:
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 693 | 1 |
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _a ( snake_case_ , snake_case_ , unittest.TestCase ):
_UpperCamelCase: Optional[Any] = IFImgaImgSuperResolutionPipeline
_UpperCamelCase: int = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"width", "height"}
_UpperCamelCase: Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"original_image"} )
_UpperCamelCase: List[str] = PipelineTesterMixin.required_optional_params - {"latents"}
def _snake_case ( self ) -> int:
return self._get_superresolution_dummy_components()
def _snake_case ( self , lowercase_ , lowercase_=0 ) -> Optional[Any]:
if str(lowercase_ ).startswith("""mps""" ):
lowerCAmelCase : Any = torch.manual_seed(lowercase_ )
else:
lowerCAmelCase : List[Any] = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
lowerCAmelCase : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase_ ) ).to(lowercase_ )
lowerCAmelCase : Optional[Any] = floats_tensor((1, 3, 16, 16) , rng=random.Random(lowercase_ ) ).to(lowercase_ )
lowerCAmelCase : Optional[int] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def _snake_case ( self ) -> Optional[int]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def _snake_case ( self ) -> int:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def _snake_case ( self ) -> Any:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def _snake_case ( self ) -> int:
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def _snake_case ( self ) -> Any:
self._test_save_load_local()
def _snake_case ( self ) -> str:
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 693 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Optional[int] =logging.get_logger(__name__)
lowerCAmelCase : List[Any] ={}
class _a ( snake_case_ ):
_UpperCamelCase: Tuple = "llama"
_UpperCamelCase: List[str] = ["past_key_values"]
def __init__( self , lowercase_=32000 , lowercase_=4096 , lowercase_=11008 , lowercase_=32 , lowercase_=32 , lowercase_=None , lowercase_="silu" , lowercase_=2048 , lowercase_=0.0_2 , lowercase_=1e-6 , lowercase_=True , lowercase_=0 , lowercase_=1 , lowercase_=2 , lowercase_=1 , lowercase_=False , lowercase_=None , **lowercase_ , ) -> Optional[int]:
lowerCAmelCase : List[str] = vocab_size
lowerCAmelCase : str = max_position_embeddings
lowerCAmelCase : int = hidden_size
lowerCAmelCase : Optional[int] = intermediate_size
lowerCAmelCase : int = num_hidden_layers
lowerCAmelCase : Any = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
lowerCAmelCase : Any = num_attention_heads
lowerCAmelCase : Any = num_key_value_heads
lowerCAmelCase : Any = hidden_act
lowerCAmelCase : Union[str, Any] = initializer_range
lowerCAmelCase : str = rms_norm_eps
lowerCAmelCase : int = pretraining_tp
lowerCAmelCase : int = use_cache
lowerCAmelCase : Optional[Any] = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , tie_word_embeddings=lowercase_ , **lowercase_ , )
def _snake_case ( self ) -> Dict:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , lowercase_ ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
f"""got {self.rope_scaling}""" )
lowerCAmelCase : Union[str, Any] = self.rope_scaling.get("""type""" , lowercase_ )
lowerCAmelCase : Dict = self.rope_scaling.get("""factor""" , lowercase_ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(lowercase_ , lowercase_ ) or rope_scaling_factor <= 1.0:
raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 693 | 1 |
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
lowerCAmelCase : List[str] ={'UserAgent': UserAgent().random}
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = script.contents[0]
lowerCAmelCase : int = json.loads(data[data.find("""{\"config\"""" ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class _a :
def __init__( self , lowercase_ ) -> Tuple:
lowerCAmelCase : List[str] = f"""https://www.instagram.com/{username}/"""
lowerCAmelCase : str = self.get_json()
def _snake_case ( self ) -> dict:
lowerCAmelCase : Tuple = requests.get(self.url , headers=lowercase_ ).text
lowerCAmelCase : int = BeautifulSoup(lowercase_ , """html.parser""" ).find_all("""script""" )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self ) -> str:
return f"""{self.__class__.__name__}('{self.username}')"""
def __str__( self ) -> str:
return f"""{self.fullname} ({self.username}) is {self.biography}"""
@property
def _snake_case ( self ) -> str:
return self.user_data["username"]
@property
def _snake_case ( self ) -> str:
return self.user_data["full_name"]
@property
def _snake_case ( self ) -> str:
return self.user_data["biography"]
@property
def _snake_case ( self ) -> str:
return self.user_data["business_email"]
@property
def _snake_case ( self ) -> str:
return self.user_data["external_url"]
@property
def _snake_case ( self ) -> int:
return self.user_data["edge_followed_by"]["count"]
@property
def _snake_case ( self ) -> int:
return self.user_data["edge_follow"]["count"]
@property
def _snake_case ( self ) -> int:
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def _snake_case ( self ) -> str:
return self.user_data["profile_pic_url_hd"]
@property
def _snake_case ( self ) -> bool:
return self.user_data["is_verified"]
@property
def _snake_case ( self ) -> bool:
return self.user_data["is_private"]
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ = "github" ):
'''simple docstring'''
import os
if os.environ.get("""CI""" ):
return # test failing on GitHub Actions
lowerCAmelCase : int = InstagramUser(SCREAMING_SNAKE_CASE__ )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data ,SCREAMING_SNAKE_CASE__ )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 1_5_0
assert instagram_user.number_of_followers > 1_2_0_0_0_0
assert instagram_user.number_of_followings > 1_5
assert instagram_user.email == "[email protected]"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith("""https://instagram.""" )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase : int =InstagramUser('github')
print(instagram_user)
print(F'''{instagram_user.number_of_posts = }''')
print(F'''{instagram_user.number_of_followers = }''')
print(F'''{instagram_user.number_of_followings = }''')
print(F'''{instagram_user.email = }''')
print(F'''{instagram_user.website = }''')
print(F'''{instagram_user.profile_picture_url = }''')
print(F'''{instagram_user.is_verified = }''')
print(F'''{instagram_user.is_private = }''')
| 693 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase : int =logging.get_logger(__name__)
lowerCAmelCase : List[Any] ={
'microsoft/swin-tiny-patch4-window7-224': (
'https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json'
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class _a ( snake_case_ , snake_case_ ):
_UpperCamelCase: int = "swin"
_UpperCamelCase: str = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , lowercase_=224 , lowercase_=4 , lowercase_=3 , lowercase_=96 , lowercase_=[2, 2, 6, 2] , lowercase_=[3, 6, 12, 24] , lowercase_=7 , lowercase_=4.0 , lowercase_=True , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.1 , lowercase_="gelu" , lowercase_=False , lowercase_=0.0_2 , lowercase_=1e-5 , lowercase_=32 , lowercase_=None , lowercase_=None , **lowercase_ , ) -> Tuple:
super().__init__(**lowercase_ )
lowerCAmelCase : Optional[int] = image_size
lowerCAmelCase : Optional[Any] = patch_size
lowerCAmelCase : Optional[Any] = num_channels
lowerCAmelCase : List[Any] = embed_dim
lowerCAmelCase : str = depths
lowerCAmelCase : List[str] = len(lowercase_ )
lowerCAmelCase : Any = num_heads
lowerCAmelCase : str = window_size
lowerCAmelCase : List[str] = mlp_ratio
lowerCAmelCase : List[Any] = qkv_bias
lowerCAmelCase : List[str] = hidden_dropout_prob
lowerCAmelCase : int = attention_probs_dropout_prob
lowerCAmelCase : Any = drop_path_rate
lowerCAmelCase : int = hidden_act
lowerCAmelCase : int = use_absolute_embeddings
lowerCAmelCase : Dict = layer_norm_eps
lowerCAmelCase : Any = initializer_range
lowerCAmelCase : Dict = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCAmelCase : Any = int(embed_dim * 2 ** (len(lowercase_ ) - 1) )
lowerCAmelCase : Dict = ["""stem"""] + [f"""stage{idx}""" for idx in range(1 , len(lowercase_ ) + 1 )]
lowerCAmelCase , lowerCAmelCase : Optional[Any] = get_aligned_output_features_output_indices(
out_features=lowercase_ , out_indices=lowercase_ , stage_names=self.stage_names )
class _a ( snake_case_ ):
_UpperCamelCase: int = version.parse("1.11" )
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def _snake_case ( self ) -> float:
return 1e-4
| 693 | 1 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCAmelCase : Union[str, Any] =16
lowerCAmelCase : Optional[Any] =32
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ = 1_6 ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = AutoTokenizer.from_pretrained("""bert-base-cased""" )
lowerCAmelCase : List[Any] = load_dataset("""glue""" ,"""mrpc""" )
def tokenize_function(SCREAMING_SNAKE_CASE__ ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase : Union[str, Any] = tokenizer(examples["""sentence1"""] ,examples["""sentence2"""] ,truncation=SCREAMING_SNAKE_CASE__ ,max_length=SCREAMING_SNAKE_CASE__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCAmelCase : Optional[Any] = datasets.map(
SCREAMING_SNAKE_CASE__ ,batched=SCREAMING_SNAKE_CASE__ ,remove_columns=["""idx""", """sentence1""", """sentence2"""] ,)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase : List[Any] = tokenized_datasets.rename_column("""label""" ,"""labels""" )
def collate_fn(SCREAMING_SNAKE_CASE__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCAmelCase : Any = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCAmelCase : Union[str, Any] = 1_6
elif accelerator.mixed_precision != "no":
lowerCAmelCase : Union[str, Any] = 8
else:
lowerCAmelCase : Dict = None
return tokenizer.pad(
SCREAMING_SNAKE_CASE__ ,padding="""longest""" ,max_length=SCREAMING_SNAKE_CASE__ ,pad_to_multiple_of=SCREAMING_SNAKE_CASE__ ,return_tensors="""pt""" ,)
# Instantiate dataloaders.
lowerCAmelCase : List[Any] = DataLoader(
tokenized_datasets["""train"""] ,shuffle=SCREAMING_SNAKE_CASE__ ,collate_fn=SCREAMING_SNAKE_CASE__ ,batch_size=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Optional[Any] = DataLoader(
tokenized_datasets["""validation"""] ,shuffle=SCREAMING_SNAKE_CASE__ ,collate_fn=SCREAMING_SNAKE_CASE__ ,batch_size=SCREAMING_SNAKE_CASE__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowerCAmelCase : Optional[int] =mocked_dataloaders # noqa: F811
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" ,SCREAMING_SNAKE_CASE__ ) == "1":
lowerCAmelCase : str = 2
# New Code #
lowerCAmelCase : List[str] = int(args.gradient_accumulation_steps )
lowerCAmelCase : Union[str, Any] = int(args.local_sgd_steps )
# Initialize accelerator
lowerCAmelCase : Optional[Any] = Accelerator(
cpu=args.cpu ,mixed_precision=args.mixed_precision ,gradient_accumulation_steps=SCREAMING_SNAKE_CASE__ )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError("""LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)""" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase : Optional[int] = config["""lr"""]
lowerCAmelCase : int = int(config["""num_epochs"""] )
lowerCAmelCase : str = int(config["""seed"""] )
lowerCAmelCase : List[Any] = int(config["""batch_size"""] )
lowerCAmelCase : List[str] = evaluate.load("""glue""" ,"""mrpc""" )
set_seed(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase , lowerCAmelCase : Optional[int] = get_dataloaders(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase : Optional[Any] = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" ,return_dict=SCREAMING_SNAKE_CASE__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCAmelCase : Optional[int] = model.to(accelerator.device )
# Instantiate optimizer
lowerCAmelCase : Optional[Any] = AdamW(params=model.parameters() ,lr=SCREAMING_SNAKE_CASE__ )
# Instantiate scheduler
lowerCAmelCase : Optional[Any] = get_linear_schedule_with_warmup(
optimizer=SCREAMING_SNAKE_CASE__ ,num_warmup_steps=1_0_0 ,num_training_steps=(len(SCREAMING_SNAKE_CASE__ ) * num_epochs) ,)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : List[Any] = accelerator.prepare(
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
# Now we train the model
for epoch in range(SCREAMING_SNAKE_CASE__ ):
model.train()
with LocalSGD(
accelerator=SCREAMING_SNAKE_CASE__ ,model=SCREAMING_SNAKE_CASE__ ,local_sgd_steps=SCREAMING_SNAKE_CASE__ ,enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(SCREAMING_SNAKE_CASE__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase : Union[str, Any] = model(**SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Any = output.loss
accelerator.backward(SCREAMING_SNAKE_CASE__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(SCREAMING_SNAKE_CASE__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase : List[Any] = model(**SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : List[str] = outputs.logits.argmax(dim=-1 )
lowerCAmelCase , lowerCAmelCase : str = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=SCREAMING_SNAKE_CASE__ ,references=SCREAMING_SNAKE_CASE__ ,)
lowerCAmelCase : List[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" ,SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( ):
'''simple docstring'''
lowerCAmelCase : Any = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" ,type=SCREAMING_SNAKE_CASE__ ,default=SCREAMING_SNAKE_CASE__ ,choices=["""no""", """fp16""", """bf16""", """fp8"""] ,help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" ,)
# New Code #
parser.add_argument(
"""--gradient_accumulation_steps""" ,type=SCREAMING_SNAKE_CASE__ ,default=1 ,help="""The number of minibatches to be ran before gradients are accumulated.""" ,)
parser.add_argument(
"""--local_sgd_steps""" ,type=SCREAMING_SNAKE_CASE__ ,default=8 ,help="""Number of local SGD steps or None to disable local SGD""" )
parser.add_argument("""--cpu""" ,action="""store_true""" ,help="""If passed, will train on the CPU.""" )
lowerCAmelCase : Optional[int] = parser.parse_args()
lowerCAmelCase : Optional[Any] = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 4_2, """batch_size""": 1_6}
training_function(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
main()
| 693 |
lowerCAmelCase : str ={
'Pillow': 'Pillow<10.0.0',
'accelerate': 'accelerate>=0.20.3',
'av': 'av==9.2.0',
'beautifulsoup4': 'beautifulsoup4',
'black': 'black~=23.1',
'codecarbon': 'codecarbon==1.2.0',
'cookiecutter': 'cookiecutter==1.7.3',
'dataclasses': 'dataclasses',
'datasets': 'datasets!=2.5.0',
'decord': 'decord==0.6.0',
'deepspeed': 'deepspeed>=0.9.3',
'diffusers': 'diffusers',
'dill': 'dill<0.3.5',
'evaluate': 'evaluate>=0.2.0',
'fairscale': 'fairscale>0.3',
'faiss-cpu': 'faiss-cpu',
'fastapi': 'fastapi',
'filelock': 'filelock',
'flax': 'flax>=0.4.1,<=0.7.0',
'ftfy': 'ftfy',
'fugashi': 'fugashi>=1.0',
'GitPython': 'GitPython<3.1.19',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.14.1,<1.0',
'importlib_metadata': 'importlib_metadata',
'ipadic': 'ipadic>=1.0.0,<2.0',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2,<=0.4.13',
'jaxlib': 'jaxlib>=0.1.65,<=0.4.13',
'jieba': 'jieba',
'kenlm': 'kenlm',
'keras-nlp': 'keras-nlp>=0.3.1',
'librosa': 'librosa',
'nltk': 'nltk',
'natten': 'natten>=0.14.6',
'numpy': 'numpy>=1.17',
'onnxconverter-common': 'onnxconverter-common',
'onnxruntime-tools': 'onnxruntime-tools>=1.4.2',
'onnxruntime': 'onnxruntime>=1.4.0',
'opencv-python': 'opencv-python',
'optuna': 'optuna',
'optax': 'optax>=0.0.8,<=0.1.4',
'packaging': 'packaging>=20.0',
'parameterized': 'parameterized',
'phonemizer': 'phonemizer',
'protobuf': 'protobuf',
'psutil': 'psutil',
'pyyaml': 'pyyaml>=5.1',
'pydantic': 'pydantic<2',
'pytest': 'pytest>=7.2.0',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'python': 'python>=3.8.0',
'ray[tune]': 'ray[tune]',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'rhoknp': 'rhoknp>=1.1.0,<1.3.1',
'rjieba': 'rjieba',
'rouge-score': 'rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1',
'ruff': 'ruff>=0.0.241,<=0.0.259',
'sacrebleu': 'sacrebleu>=1.4.12,<2.0.0',
'sacremoses': 'sacremoses',
'safetensors': 'safetensors>=0.3.1',
'sagemaker': 'sagemaker>=2.31.0',
'scikit-learn': 'scikit-learn',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'sigopt': 'sigopt',
'starlette': 'starlette',
'sudachipy': 'sudachipy>=0.6.6',
'sudachidict_core': 'sudachidict_core>=20220729',
'tensorflow-cpu': 'tensorflow-cpu>=2.6,<2.14',
'tensorflow': 'tensorflow>=2.6,<2.14',
'tensorflow-text': 'tensorflow-text<2.14',
'tf2onnx': 'tf2onnx',
'timeout-decorator': 'timeout-decorator',
'timm': 'timm',
'tokenizers': 'tokenizers>=0.11.1,!=0.11.3,<0.14',
'torch': 'torch>=1.9,!=1.12.0',
'torchaudio': 'torchaudio',
'torchvision': 'torchvision',
'pyctcdecode': 'pyctcdecode>=0.4.0',
'tqdm': 'tqdm>=4.27',
'unidic': 'unidic>=1.0.2',
'unidic_lite': 'unidic_lite>=1.0.7',
'urllib3': 'urllib3<2.0.0',
'uvicorn': 'uvicorn',
}
| 693 | 1 |
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def is_in_circle(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> bool:
lowerCAmelCase : Any = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
lowerCAmelCase : List[str] = mean(
int(is_in_circle(uniform(-1.0 ,1.0 ) ,uniform(-1.0 ,1.0 ) ) )
for _ in range(SCREAMING_SNAKE_CASE__ ) )
# The ratio of the area for circle to square is pi/4.
lowerCAmelCase : List[Any] = proportion * 4
print(F"""The estimated value of pi is {pi_estimate}""" )
print(F"""The numpy value of pi is {pi}""" )
print(F"""The total error is {abs(pi - pi_estimate )}""" )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ = 0.0 ,SCREAMING_SNAKE_CASE__ = 1.0 ,):
'''simple docstring'''
return mean(
function_to_integrate(uniform(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) ) for _ in range(SCREAMING_SNAKE_CASE__ ) ) * (max_value - min_value)
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ = 0.0 ,SCREAMING_SNAKE_CASE__ = 1.0 ):
'''simple docstring'''
def identity_function(SCREAMING_SNAKE_CASE__ ) -> float:
return x
lowerCAmelCase : Union[str, Any] = area_under_curve_estimator(
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : str = (max_value * max_value - min_value * min_value) / 2
print("""******************""" )
print(F"""Estimating area under y=x where x varies from {min_value} to {max_value}""" )
print(F"""Estimated value is {estimated_value}""" )
print(F"""Expected value is {expected_value}""" )
print(F"""Total error is {abs(estimated_value - expected_value )}""" )
print("""******************""" )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def function_to_integrate(SCREAMING_SNAKE_CASE__ ) -> float:
return sqrt(4.0 - x * x )
lowerCAmelCase : int = area_under_curve_estimator(
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,0.0 ,2.0 )
print("""******************""" )
print("""Estimating pi using area_under_curve_estimator""" )
print(F"""Estimated value is {estimated_value}""" )
print(F"""Expected value is {pi}""" )
print(F"""Total error is {abs(estimated_value - pi )}""" )
print("""******************""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 693 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase : Union[str, Any] ={
'configuration_roformer': ['ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoFormerConfig', 'RoFormerOnnxConfig'],
'tokenization_roformer': ['RoFormerTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Dict =['RoFormerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[int] =[
'ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoFormerForCausalLM',
'RoFormerForMaskedLM',
'RoFormerForMultipleChoice',
'RoFormerForQuestionAnswering',
'RoFormerForSequenceClassification',
'RoFormerForTokenClassification',
'RoFormerLayer',
'RoFormerModel',
'RoFormerPreTrainedModel',
'load_tf_weights_in_roformer',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Tuple =[
'TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRoFormerForCausalLM',
'TFRoFormerForMaskedLM',
'TFRoFormerForMultipleChoice',
'TFRoFormerForQuestionAnswering',
'TFRoFormerForSequenceClassification',
'TFRoFormerForTokenClassification',
'TFRoFormerLayer',
'TFRoFormerModel',
'TFRoFormerPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : int =[
'FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'FlaxRoFormerForMaskedLM',
'FlaxRoFormerForMultipleChoice',
'FlaxRoFormerForQuestionAnswering',
'FlaxRoFormerForSequenceClassification',
'FlaxRoFormerForTokenClassification',
'FlaxRoFormerModel',
'FlaxRoFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Tuple =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 693 | 1 |
from math import isqrt
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : List[Any] = [True] * max_number
for i in range(2 ,isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase : List[str] = False
return [i for i in range(2 ,SCREAMING_SNAKE_CASE__ ) if is_prime[i]]
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ = 1_0**8 ):
'''simple docstring'''
lowerCAmelCase : str = calculate_prime_numbers(max_number // 2 )
lowerCAmelCase : Union[str, Any] = 0
lowerCAmelCase : List[str] = 0
lowerCAmelCase : Tuple = len(SCREAMING_SNAKE_CASE__ ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 693 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return int(input_a == input_a == 0 )
def _UpperCAmelCase ( ):
'''simple docstring'''
print("""Truth Table of NOR Gate:""" )
print("""| Input 1 | Input 2 | Output |""" )
print(F"""| 0 | 0 | {nor_gate(0 ,0 )} |""" )
print(F"""| 0 | 1 | {nor_gate(0 ,1 )} |""" )
print(F"""| 1 | 0 | {nor_gate(1 ,0 )} |""" )
print(F"""| 1 | 1 | {nor_gate(1 ,1 )} |""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 693 | 1 |
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return ConvertCommand(
args.model_type ,args.tf_checkpoint ,args.pytorch_dump_output ,args.config ,args.finetuning_task_name )
lowerCAmelCase : Any ='\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n'
class _a ( snake_case_ ):
@staticmethod
def _snake_case ( lowercase_ ) -> Dict:
lowerCAmelCase : int = parser.add_parser(
"""convert""" , help="""CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.""" , )
train_parser.add_argument("""--model_type""" , type=lowercase_ , required=lowercase_ , help="""Model's type.""" )
train_parser.add_argument(
"""--tf_checkpoint""" , type=lowercase_ , required=lowercase_ , help="""TensorFlow checkpoint path or folder.""" )
train_parser.add_argument(
"""--pytorch_dump_output""" , type=lowercase_ , required=lowercase_ , help="""Path to the PyTorch saved model output.""" )
train_parser.add_argument("""--config""" , type=lowercase_ , default="""""" , help="""Configuration file path or folder.""" )
train_parser.add_argument(
"""--finetuning_task_name""" , type=lowercase_ , default=lowercase_ , help="""Optional fine-tuning task name if the TF model was a finetuned model.""" , )
train_parser.set_defaults(func=lowercase_ )
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , *lowercase_ , ) -> int:
lowerCAmelCase : str = logging.get_logger("""transformers-cli/converting""" )
self._logger.info(f"""Loading model {model_type}""" )
lowerCAmelCase : str = model_type
lowerCAmelCase : str = tf_checkpoint
lowerCAmelCase : int = pytorch_dump_output
lowerCAmelCase : Union[str, Any] = config
lowerCAmelCase : List[str] = finetuning_task_name
def _snake_case ( self ) -> Dict:
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowercase_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowercase_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowercase_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(lowercase_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowercase_ )
if "ckpt" in self._tf_checkpoint.lower():
lowerCAmelCase : Optional[int] = self._tf_checkpoint
lowerCAmelCase : Tuple = """"""
else:
lowerCAmelCase : Tuple = self._tf_checkpoint
lowerCAmelCase : List[str] = """"""
convert_transfo_xl_checkpoint_to_pytorch(
lowercase_ , self._config , self._pytorch_dump_output , lowercase_ )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowercase_ )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowercase_ )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
"""--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]""" )
| 693 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase : int ={
'configuration_poolformer': [
'POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'PoolFormerConfig',
'PoolFormerOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[Any] =['PoolFormerFeatureExtractor']
lowerCAmelCase : List[str] =['PoolFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Tuple =[
'POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PoolFormerForImageClassification',
'PoolFormerModel',
'PoolFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Any =_LazyModule(__name__, globals()['__file__'], _import_structure)
| 693 | 1 |
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'The `inpainting.py` script is outdated. Please use directly `from diffusers import'
' StableDiffusionInpaintPipeline` instead.'
)
| 693 |
import os
import string
import sys
lowerCAmelCase : Optional[int] =1 << 8
lowerCAmelCase : List[Any] ={
'tab': ord('\t'),
'newline': ord('\r'),
'esc': 27,
'up': 65 + ARROW_KEY_FLAG,
'down': 66 + ARROW_KEY_FLAG,
'right': 67 + ARROW_KEY_FLAG,
'left': 68 + ARROW_KEY_FLAG,
'mod_int': 91,
'undefined': sys.maxsize,
'interrupt': 3,
'insert': 50,
'delete': 51,
'pg_up': 53,
'pg_down': 54,
}
lowerCAmelCase : Optional[Any] =KEYMAP['up']
lowerCAmelCase : Tuple =KEYMAP['left']
if sys.platform == "win32":
lowerCAmelCase : Dict =[]
lowerCAmelCase : int ={
b'\xe0H': KEYMAP['up'] - ARROW_KEY_FLAG,
b'\x00H': KEYMAP['up'] - ARROW_KEY_FLAG,
b'\xe0P': KEYMAP['down'] - ARROW_KEY_FLAG,
b'\x00P': KEYMAP['down'] - ARROW_KEY_FLAG,
b'\xe0M': KEYMAP['right'] - ARROW_KEY_FLAG,
b'\x00M': KEYMAP['right'] - ARROW_KEY_FLAG,
b'\xe0K': KEYMAP['left'] - ARROW_KEY_FLAG,
b'\x00K': KEYMAP['left'] - ARROW_KEY_FLAG,
}
for i in range(10):
lowerCAmelCase : Optional[Any] =ord(str(i))
def _UpperCAmelCase ( ):
'''simple docstring'''
if os.name == "nt":
import msvcrt
lowerCAmelCase : Any = """mbcs"""
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(SCREAMING_SNAKE_CASE__ ) == 0:
# Read the keystroke
lowerCAmelCase : int = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
lowerCAmelCase : Tuple = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
lowerCAmelCase : str = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP["""mod_int"""] ) )
WIN_CH_BUFFER.append(SCREAMING_SNAKE_CASE__ )
if ord(SCREAMING_SNAKE_CASE__ ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(1_2_6 ) )
lowerCAmelCase : Optional[Any] = chr(KEYMAP["""esc"""] )
except KeyError:
lowerCAmelCase : Optional[int] = cha[1]
else:
lowerCAmelCase : Any = ch.decode(SCREAMING_SNAKE_CASE__ )
else:
lowerCAmelCase : Optional[int] = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
lowerCAmelCase : List[Any] = sys.stdin.fileno()
lowerCAmelCase : str = termios.tcgetattr(SCREAMING_SNAKE_CASE__ )
try:
tty.setraw(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Optional[Any] = sys.stdin.read(1 )
finally:
termios.tcsetattr(SCREAMING_SNAKE_CASE__ ,termios.TCSADRAIN ,SCREAMING_SNAKE_CASE__ )
return ch
def _UpperCAmelCase ( ):
'''simple docstring'''
lowerCAmelCase : Any = get_raw_chars()
if ord(SCREAMING_SNAKE_CASE__ ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(SCREAMING_SNAKE_CASE__ ) == KEYMAP["esc"]:
lowerCAmelCase : int = get_raw_chars()
if ord(SCREAMING_SNAKE_CASE__ ) == KEYMAP["mod_int"]:
lowerCAmelCase : Tuple = get_raw_chars()
if ord(SCREAMING_SNAKE_CASE__ ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(SCREAMING_SNAKE_CASE__ ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(SCREAMING_SNAKE_CASE__ ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 693 | 1 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Tuple = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Dict = 0
while b > 0:
if b & 1:
lowerCAmelCase : Optional[int] = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 693 |
# Imports
import numpy as np
class _a :
def __init__( self , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None ) -> List[Any]:
self.set_matricies(red=lowercase_ , green=lowercase_ , blue=lowercase_ , red_edge=lowercase_ , nir=lowercase_ )
def _snake_case ( self , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None ) -> Union[str, Any]:
if red is not None:
lowerCAmelCase : str = red
if green is not None:
lowerCAmelCase : Optional[int] = green
if blue is not None:
lowerCAmelCase : Optional[int] = blue
if red_edge is not None:
lowerCAmelCase : Tuple = red_edge
if nir is not None:
lowerCAmelCase : Union[str, Any] = nir
return True
def _snake_case ( self , lowercase_="" , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None ) -> Optional[int]:
self.set_matricies(red=lowercase_ , green=lowercase_ , blue=lowercase_ , red_edge=lowercase_ , nir=lowercase_ )
lowerCAmelCase : int = {
"""ARVI2""": self.arvaa,
"""CCCI""": self.ccci,
"""CVI""": self.cvi,
"""GLI""": self.gli,
"""NDVI""": self.ndvi,
"""BNDVI""": self.bndvi,
"""redEdgeNDVI""": self.red_edge_ndvi,
"""GNDVI""": self.gndvi,
"""GBNDVI""": self.gbndvi,
"""GRNDVI""": self.grndvi,
"""RBNDVI""": self.rbndvi,
"""PNDVI""": self.pndvi,
"""ATSAVI""": self.atsavi,
"""BWDRVI""": self.bwdrvi,
"""CIgreen""": self.ci_green,
"""CIrededge""": self.ci_rededge,
"""CI""": self.ci,
"""CTVI""": self.ctvi,
"""GDVI""": self.gdvi,
"""EVI""": self.evi,
"""GEMI""": self.gemi,
"""GOSAVI""": self.gosavi,
"""GSAVI""": self.gsavi,
"""Hue""": self.hue,
"""IVI""": self.ivi,
"""IPVI""": self.ipvi,
"""I""": self.i,
"""RVI""": self.rvi,
"""MRVI""": self.mrvi,
"""MSAVI""": self.m_savi,
"""NormG""": self.norm_g,
"""NormNIR""": self.norm_nir,
"""NormR""": self.norm_r,
"""NGRDI""": self.ngrdi,
"""RI""": self.ri,
"""S""": self.s,
"""IF""": self._if,
"""DVI""": self.dvi,
"""TVI""": self.tvi,
"""NDRE""": self.ndre,
}
try:
return funcs[index]()
except KeyError:
print("""Index not in the list!""" )
return False
def _snake_case ( self ) -> Dict:
return -0.1_8 + (1.1_7 * ((self.nir - self.red) / (self.nir + self.red)))
def _snake_case ( self ) -> Optional[Any]:
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def _snake_case ( self ) -> List[str]:
return self.nir * (self.red / (self.green**2))
def _snake_case ( self ) -> Tuple:
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def _snake_case ( self ) -> Optional[int]:
return (self.nir - self.red) / (self.nir + self.red)
def _snake_case ( self ) -> List[str]:
return (self.nir - self.blue) / (self.nir + self.blue)
def _snake_case ( self ) -> int:
return (self.redEdge - self.red) / (self.redEdge + self.red)
def _snake_case ( self ) -> Optional[Any]:
return (self.nir - self.green) / (self.nir + self.green)
def _snake_case ( self ) -> Tuple:
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def _snake_case ( self ) -> Tuple:
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def _snake_case ( self ) -> int:
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def _snake_case ( self ) -> List[str]:
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def _snake_case ( self , lowercase_=0.0_8 , lowercase_=1.2_2 , lowercase_=0.0_3 ) -> int:
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def _snake_case ( self ) -> Optional[Any]:
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def _snake_case ( self ) -> Any:
return (self.nir / self.green) - 1
def _snake_case ( self ) -> List[Any]:
return (self.nir / self.redEdge) - 1
def _snake_case ( self ) -> str:
return (self.red - self.blue) / self.red
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase : Dict = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def _snake_case ( self ) -> Optional[Any]:
return self.nir - self.green
def _snake_case ( self ) -> int:
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def _snake_case ( self ) -> Optional[Any]:
lowerCAmelCase : Tuple = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.2_5 * n) - (self.red - 0.1_2_5) / (1 - self.red)
def _snake_case ( self , lowercase_=0.1_6 ) -> Optional[int]:
return (self.nir - self.green) / (self.nir + self.green + y)
def _snake_case ( self , lowercase_=0.5 ) -> List[str]:
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def _snake_case ( self ) -> Any:
return np.arctan(
((2 * self.red - self.green - self.blue) / 3_0.5) * (self.green - self.blue) )
def _snake_case ( self , lowercase_=None , lowercase_=None ) -> List[Any]:
return (self.nir - b) / (a * self.red)
def _snake_case ( self ) -> Any:
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def _snake_case ( self ) -> str:
return (self.red + self.green + self.blue) / 3_0.5
def _snake_case ( self ) -> Union[str, Any]:
return self.nir / self.red
def _snake_case ( self ) -> Tuple:
return (self.rvi() - 1) / (self.rvi() + 1)
def _snake_case ( self ) -> Dict:
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def _snake_case ( self ) -> List[Any]:
return self.green / (self.nir + self.red + self.green)
def _snake_case ( self ) -> int:
return self.nir / (self.nir + self.red + self.green)
def _snake_case ( self ) -> Dict:
return self.red / (self.nir + self.red + self.green)
def _snake_case ( self ) -> List[Any]:
return (self.green - self.red) / (self.green + self.red)
def _snake_case ( self ) -> Optional[int]:
return (self.red - self.green) / (self.red + self.green)
def _snake_case ( self ) -> Tuple:
lowerCAmelCase : Any = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
lowerCAmelCase : Dict = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def _snake_case ( self ) -> int:
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def _snake_case ( self ) -> List[str]:
return self.nir / self.red
def _snake_case ( self ) -> int:
return (self.ndvi() + 0.5) ** (1 / 2)
def _snake_case ( self ) -> str:
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 693 | 1 |
def _UpperCAmelCase ( ):
'''simple docstring'''
lowerCAmelCase : List[str] = []
lowerCAmelCase : Tuple = 1
while len(SCREAMING_SNAKE_CASE__ ) < 1e6:
constant.append(str(SCREAMING_SNAKE_CASE__ ) )
i += 1
lowerCAmelCase : Tuple = """""".join(SCREAMING_SNAKE_CASE__ )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[9_9] )
* int(constant[9_9_9] )
* int(constant[9_9_9_9] )
* int(constant[9_9_9_9_9] )
* int(constant[9_9_9_9_9_9] )
)
if __name__ == "__main__":
print(solution())
| 693 |
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : List[str] = None
if token is not None:
lowerCAmelCase : Union[str, Any] = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"""Bearer {token}"""}
lowerCAmelCase : Optional[Any] = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"""
lowerCAmelCase : Any = requests.get(SCREAMING_SNAKE_CASE__ ,headers=SCREAMING_SNAKE_CASE__ ).json()
lowerCAmelCase : List[str] = {}
try:
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
lowerCAmelCase : int = math.ceil((result["""total_count"""] - 1_0_0) / 1_0_0 )
for i in range(SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase : List[str] = requests.get(url + F"""&page={i + 2}""" ,headers=SCREAMING_SNAKE_CASE__ ).json()
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return job_links
except Exception:
print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = None
if token is not None:
lowerCAmelCase : str = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"""Bearer {token}"""}
lowerCAmelCase : Optional[int] = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"""
lowerCAmelCase : Optional[int] = requests.get(SCREAMING_SNAKE_CASE__ ,headers=SCREAMING_SNAKE_CASE__ ).json()
lowerCAmelCase : List[str] = {}
try:
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
lowerCAmelCase : Optional[int] = math.ceil((result["""total_count"""] - 1_0_0) / 1_0_0 )
for i in range(SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase : int = requests.get(url + F"""&page={i + 2}""" ,headers=SCREAMING_SNAKE_CASE__ ).json()
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
return artifacts
except Exception:
print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Dict = None
if token is not None:
lowerCAmelCase : Optional[Any] = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"""Bearer {token}"""}
lowerCAmelCase : str = requests.get(SCREAMING_SNAKE_CASE__ ,headers=SCREAMING_SNAKE_CASE__ ,allow_redirects=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Union[str, Any] = result.headers["""Location"""]
lowerCAmelCase : Optional[int] = requests.get(SCREAMING_SNAKE_CASE__ ,allow_redirects=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Union[str, Any] = os.path.join(SCREAMING_SNAKE_CASE__ ,F"""{artifact_name}.zip""" )
with open(SCREAMING_SNAKE_CASE__ ,"""wb""" ) as fp:
fp.write(response.content )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : int = []
lowerCAmelCase : Optional[int] = []
lowerCAmelCase : Optional[int] = None
with zipfile.ZipFile(SCREAMING_SNAKE_CASE__ ) as z:
for filename in z.namelist():
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(SCREAMING_SNAKE_CASE__ ) as f:
for line in f:
lowerCAmelCase : Optional[Any] = line.decode("""UTF-8""" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
lowerCAmelCase : str = line[: line.index(""": """ )]
lowerCAmelCase : Optional[int] = line[line.index(""": """ ) + len(""": """ ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("""FAILED """ ):
# `test` is the test method that failed
lowerCAmelCase : Union[str, Any] = line[len("""FAILED """ ) :]
failed_tests.append(SCREAMING_SNAKE_CASE__ )
elif filename == "job_name.txt":
lowerCAmelCase : Union[str, Any] = line
if len(SCREAMING_SNAKE_CASE__ ) != len(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
F"""`errors` and `failed_tests` should have the same number of elements. Got {len(SCREAMING_SNAKE_CASE__ )} for `errors` """
F"""and {len(SCREAMING_SNAKE_CASE__ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"""
""" problem.""" )
lowerCAmelCase : Optional[int] = None
if job_name and job_links:
lowerCAmelCase : Optional[int] = job_links.get(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
# A list with elements of the form (line of error, error, failed test)
lowerCAmelCase : Union[str, Any] = [x + [y] + [job_link] for x, y in zip(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )]
return result
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : str = []
lowerCAmelCase : Union[str, Any] = [os.path.join(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) for p in os.listdir(SCREAMING_SNAKE_CASE__ ) if p.endswith(""".zip""" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(SCREAMING_SNAKE_CASE__ ,job_links=SCREAMING_SNAKE_CASE__ ) )
return errors
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : int = Counter()
counter.update([x[1] for x in logs] )
lowerCAmelCase : List[str] = counter.most_common()
lowerCAmelCase : Union[str, Any] = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
lowerCAmelCase : List[Any] = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]}
lowerCAmelCase : int = dict(sorted(r.items() ,key=lambda SCREAMING_SNAKE_CASE__ : item[1]["count"] ,reverse=SCREAMING_SNAKE_CASE__ ) )
return r
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = test.split("""::""" )[0]
if test.startswith("""tests/models/""" ):
lowerCAmelCase : str = test.split("""/""" )[2]
else:
lowerCAmelCase : List[Any] = None
return test
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : List[Any] = [(x[0], x[1], get_model(x[2] )) for x in logs]
lowerCAmelCase : int = [x for x in logs if x[2] is not None]
lowerCAmelCase : Optional[Any] = {x[2] for x in logs}
lowerCAmelCase : Dict = {}
for test in tests:
lowerCAmelCase : Optional[int] = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
lowerCAmelCase : Tuple = counter.most_common()
lowerCAmelCase : Union[str, Any] = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
lowerCAmelCase : List[Any] = sum(error_counts.values() )
if n_errors > 0:
lowerCAmelCase : Optional[int] = {"""count""": n_errors, """errors""": error_counts}
lowerCAmelCase : Any = dict(sorted(r.items() ,key=lambda SCREAMING_SNAKE_CASE__ : item[1]["count"] ,reverse=SCREAMING_SNAKE_CASE__ ) )
return r
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = """| no. | error | status |"""
lowerCAmelCase : List[Any] = """|-:|:-|:-|"""
lowerCAmelCase : Union[str, Any] = [header, sep]
for error in reduced_by_error:
lowerCAmelCase : List[str] = reduced_by_error[error]["""count"""]
lowerCAmelCase : Any = F"""| {count} | {error[:1_0_0]} | |"""
lines.append(SCREAMING_SNAKE_CASE__ )
return "\n".join(SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : str = """| model | no. of errors | major error | count |"""
lowerCAmelCase : Any = """|-:|-:|-:|-:|"""
lowerCAmelCase : str = [header, sep]
for model in reduced_by_model:
lowerCAmelCase : Any = reduced_by_model[model]["""count"""]
lowerCAmelCase , lowerCAmelCase : Optional[int] = list(reduced_by_model[model]["""errors"""].items() )[0]
lowerCAmelCase : Optional[Any] = F"""| {model} | {count} | {error[:6_0]} | {_count} |"""
lines.append(SCREAMING_SNAKE_CASE__ )
return "\n".join(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
lowerCAmelCase : int =argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
lowerCAmelCase : Dict =parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
lowerCAmelCase : Optional[int] =get_job_links(args.workflow_run_id, token=args.token)
lowerCAmelCase : List[Any] ={}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
lowerCAmelCase : str =k.find(' / ')
lowerCAmelCase : Any =k[index + len(' / ') :]
lowerCAmelCase : str =v
with open(os.path.join(args.output_dir, 'job_links.json'), 'w', encoding='UTF-8') as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
lowerCAmelCase : Any =get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
lowerCAmelCase : List[Any] =get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
lowerCAmelCase : str =Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
lowerCAmelCase : int =counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, 'errors.json'), 'w', encoding='UTF-8') as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
lowerCAmelCase : Optional[int] =reduce_by_error(errors)
lowerCAmelCase : Tuple =reduce_by_model(errors)
lowerCAmelCase : Optional[Any] =make_github_table(reduced_by_error)
lowerCAmelCase : Union[str, Any] =make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, 'reduced_by_error.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, 'reduced_by_model.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
| 693 | 1 |
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class _a :
def __init__( self , lowercase_ , lowercase_=99 , lowercase_=13 , lowercase_=7 , lowercase_=9 , lowercase_=True , lowercase_=True , lowercase_=False , lowercase_=32 , lowercase_=5 , lowercase_=4 , lowercase_=37 , lowercase_=8 , lowercase_=0.1 , lowercase_=0.0_0_2 , lowercase_=1 , lowercase_=0 , lowercase_=0 , lowercase_=None , lowercase_=None , ) -> str:
lowerCAmelCase : str = parent
lowerCAmelCase : Optional[Any] = batch_size
lowerCAmelCase : Any = encoder_seq_length
lowerCAmelCase : Optional[Any] = decoder_seq_length
# For common tests
lowerCAmelCase : str = self.decoder_seq_length
lowerCAmelCase : Optional[int] = is_training
lowerCAmelCase : int = use_attention_mask
lowerCAmelCase : Optional[Any] = use_labels
lowerCAmelCase : Union[str, Any] = vocab_size
lowerCAmelCase : Any = hidden_size
lowerCAmelCase : Dict = num_hidden_layers
lowerCAmelCase : int = num_attention_heads
lowerCAmelCase : Tuple = d_ff
lowerCAmelCase : Tuple = relative_attention_num_buckets
lowerCAmelCase : Optional[Any] = dropout_rate
lowerCAmelCase : Optional[int] = initializer_factor
lowerCAmelCase : str = eos_token_id
lowerCAmelCase : List[Any] = pad_token_id
lowerCAmelCase : List[Any] = decoder_start_token_id
lowerCAmelCase : List[Any] = None
lowerCAmelCase : Tuple = decoder_layers
def _snake_case ( self ) -> List[Any]:
return TaConfig.from_pretrained("""google/umt5-base""" )
def _snake_case ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , ) -> str:
if attention_mask is None:
lowerCAmelCase : List[Any] = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
lowerCAmelCase : Any = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
lowerCAmelCase : str = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=lowercase_ )
if decoder_head_mask is None:
lowerCAmelCase : List[Any] = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=lowercase_ )
if cross_attn_head_mask is None:
lowerCAmelCase : str = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=lowercase_ )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
lowerCAmelCase : Optional[int] = input_ids.clamp(self.pad_token_id + 1 )
lowerCAmelCase : int = decoder_input_ids.clamp(self.pad_token_id + 1 )
lowerCAmelCase : Dict = self.get_config()
lowerCAmelCase : Tuple = config.num_attention_heads
lowerCAmelCase : str = self.prepare_inputs_dict(lowercase_ , lowercase_ , lowercase_ )
return config, input_dict
def _snake_case ( self ) -> Any:
lowerCAmelCase , lowerCAmelCase : Dict = self.prepare_config_and_inputs()
return config, inputs_dict
def _snake_case ( self ) -> str:
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def _snake_case ( self ) -> Optional[int]:
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def _snake_case ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> Dict:
lowerCAmelCase : Optional[int] = UMTaModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowerCAmelCase : str = model(
input_ids=lowercase_ , decoder_input_ids=lowercase_ , attention_mask=lowercase_ , decoder_attention_mask=lowercase_ , )
lowerCAmelCase : int = model(input_ids=lowercase_ , decoder_input_ids=lowercase_ )
lowerCAmelCase : Any = result.last_hidden_state
lowerCAmelCase : Union[str, Any] = result.past_key_values
lowerCAmelCase : List[str] = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(lowercase_ ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def _snake_case ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> Tuple:
lowerCAmelCase : Optional[Any] = UMTaModel(config=lowercase_ ).get_decoder().to(lowercase_ ).eval()
# first forward pass
lowerCAmelCase : Optional[int] = model(lowercase_ , use_cache=lowercase_ )
lowerCAmelCase : Optional[Any] = model(lowercase_ )
lowerCAmelCase : List[Any] = model(lowercase_ , use_cache=lowercase_ )
self.parent.assertTrue(len(lowercase_ ) == len(lowercase_ ) )
self.parent.assertTrue(len(lowercase_ ) == len(lowercase_ ) + 1 )
lowerCAmelCase , lowerCAmelCase : Any = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCAmelCase : Any = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
lowerCAmelCase : List[Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase : Tuple = model(lowercase_ )["""last_hidden_state"""]
lowerCAmelCase : Optional[int] = model(lowercase_ , past_key_values=lowercase_ )["""last_hidden_state"""]
# select random slice
lowerCAmelCase : Dict = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase : str = output_from_no_past[:, -1, random_slice_idx].detach()
lowerCAmelCase : Tuple = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1e-3 ) )
def _snake_case ( self , lowercase_ , lowercase_ , ) -> Optional[int]:
lowerCAmelCase : Optional[Any] = UMTaModel(config=lowercase_ ).to(lowercase_ ).half().eval()
lowerCAmelCase : Union[str, Any] = model(**lowercase_ )["""last_hidden_state"""]
self.parent.assertFalse(torch.isnan(lowercase_ ).any().item() )
@require_torch
class _a ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
_UpperCamelCase: Any = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
_UpperCamelCase: List[str] = (UMTaForConditionalGeneration,) if is_torch_available() else ()
_UpperCamelCase: Union[str, Any] = (
{
"conversational": UMTaForConditionalGeneration,
"feature-extraction": UMTaModel,
"summarization": UMTaForConditionalGeneration,
"text2text-generation": UMTaForConditionalGeneration,
"translation": UMTaForConditionalGeneration,
"question-answering": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
_UpperCamelCase: Any = True
_UpperCamelCase: List[str] = False
_UpperCamelCase: str = False
_UpperCamelCase: Optional[int] = True
_UpperCamelCase: Dict = True
# The small UMT5 model needs higher percentages for CPU/MP tests
_UpperCamelCase: int = [0.8, 0.9]
def _snake_case ( self ) -> List[str]:
lowerCAmelCase : Union[str, Any] = UMTaModelTester(self )
@unittest.skip("""Test has a segmentation fault on torch 1.8.0""" )
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase : List[Any] = UMTaModel(config_and_inputs[0] ).to(lowercase_ )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
lowercase_ , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f"""{tmpdirname}/t5_test.onnx""" , export_params=lowercase_ , opset_version=9 , input_names=["""input_ids""", """decoder_input_ids"""] , )
@unittest.skipIf(torch_device == """cpu""" , """Cant do half precision""" )
def _snake_case ( self ) -> int:
lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*lowercase_ )
def _snake_case ( self ) -> List[Any]:
lowerCAmelCase : List[Any] = ["""encoder_attentions""", """decoder_attentions""", """cross_attentions"""]
lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase : Optional[int] = config_and_inputs[0]
lowerCAmelCase : int = UMTaForConditionalGeneration(lowercase_ ).eval()
model.to(lowercase_ )
lowerCAmelCase : Union[str, Any] = {
"""head_mask""": torch.zeros(config.num_layers , config.num_heads , device=lowercase_ ),
"""decoder_head_mask""": torch.zeros(config.num_decoder_layers , config.num_heads , device=lowercase_ ),
"""cross_attn_head_mask""": torch.zeros(config.num_decoder_layers , config.num_heads , device=lowercase_ ),
}
for attn_name, (name, mask) in zip(lowercase_ , head_masking.items() ):
lowerCAmelCase : Optional[Any] = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
lowerCAmelCase : Tuple = torch.ones(
config.num_decoder_layers , config.num_heads , device=lowercase_ )
lowerCAmelCase : Any = model.generate(
config_and_inputs[1]["""input_ids"""] , num_beams=1 , max_length=3 , output_attentions=lowercase_ , return_dict_in_generate=lowercase_ , **lowercase_ , )
# We check the state of decoder_attentions and cross_attentions just from the last step
lowerCAmelCase : Dict = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip("""Does not work on the tiny model as we keep hitting edge cases.""" )
def _snake_case ( self ) -> int:
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class _a ( unittest.TestCase ):
@slow
@unittest.skip(
"""Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged""" )
def _snake_case ( self ) -> str:
lowerCAmelCase : Union[str, Any] = UMTaForConditionalGeneration.from_pretrained("""google/umt5-small""" , return_dict=lowercase_ ).to(lowercase_ )
lowerCAmelCase : int = AutoTokenizer.from_pretrained("""google/umt5-small""" , use_fast=lowercase_ , legacy=lowercase_ )
lowerCAmelCase : List[str] = [
"""Bonjour monsieur <extra_id_0> bien <extra_id_1>.""",
"""No se como puedo <extra_id_0>.""",
"""This is the reason why we <extra_id_0> them.""",
"""The <extra_id_0> walks in <extra_id_1>, seats""",
"""A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.""",
]
lowerCAmelCase : Union[str, Any] = tokenizer(lowercase_ , return_tensors="""pt""" , padding=lowercase_ ).input_ids
# fmt: off
lowerCAmelCase : Union[str, Any] = torch.tensor(
[
[ 38530, 210703, 256299, 1410, 256298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 25922, 256299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1460, 339, 312, 19014, 10620, 758, 256299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 256299, 14869, 281, 301, 256298, 275, 119983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 256299, 14869, 281, 2234, 289, 2275, 333,61391, 289, 256298, 543, 256297, 168714, 329, 256296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(lowercase_ , lowercase_ )
lowerCAmelCase : Optional[int] = model.generate(input_ids.to(lowercase_ ) )
lowerCAmelCase : List[str] = [
"""<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>""",
"""<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
]
lowerCAmelCase : Dict = tokenizer.batch_decode(lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
| 693 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase : Optional[int] ={
'configuration_autoformer': [
'AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AutoformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Dict =[
'AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'AutoformerForPrediction',
'AutoformerModel',
'AutoformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Any =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 693 | 1 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
assert isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ), F"""The input value of [n={number}] is not an integer"""
if number == 1:
return 2
elif number < 1:
lowerCAmelCase : str = F"""The input value of [n={number}] has to be > 0"""
raise ValueError(SCREAMING_SNAKE_CASE__ )
else:
lowerCAmelCase : int = sylvester(number - 1 )
lowerCAmelCase : Optional[int] = num - 1
lowerCAmelCase : int = num
return lower * upper + 1
if __name__ == "__main__":
print(F'''The 8th number in Sylvester\'s sequence: {sylvester(8)}''')
| 693 |
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCAmelCase : Optional[int] =logging.get_logger(__name__)
lowerCAmelCase : Union[str, Any] ={
'facebook/detr-resnet-50': 'https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class _a ( snake_case_ ):
_UpperCamelCase: List[str] = "detr"
_UpperCamelCase: Dict = ["past_key_values"]
_UpperCamelCase: Optional[int] = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self , lowercase_=True , lowercase_=None , lowercase_=3 , lowercase_=100 , lowercase_=6 , lowercase_=2048 , lowercase_=8 , lowercase_=6 , lowercase_=2048 , lowercase_=8 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=True , lowercase_="relu" , lowercase_=256 , lowercase_=0.1 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.0_2 , lowercase_=1.0 , lowercase_=False , lowercase_="sine" , lowercase_="resnet50" , lowercase_=True , lowercase_=False , lowercase_=1 , lowercase_=5 , lowercase_=2 , lowercase_=1 , lowercase_=1 , lowercase_=5 , lowercase_=2 , lowercase_=0.1 , **lowercase_ , ) -> Optional[int]:
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
lowerCAmelCase : Optional[Any] = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(lowercase_ , lowercase_ ):
lowerCAmelCase : List[Any] = backbone_config.get("""model_type""" )
lowerCAmelCase : int = CONFIG_MAPPING[backbone_model_type]
lowerCAmelCase : Optional[int] = config_class.from_dict(lowercase_ )
# set timm attributes to None
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Dict = None, None, None
lowerCAmelCase : Any = use_timm_backbone
lowerCAmelCase : int = backbone_config
lowerCAmelCase : Optional[int] = num_channels
lowerCAmelCase : Optional[Any] = num_queries
lowerCAmelCase : List[str] = d_model
lowerCAmelCase : Optional[int] = encoder_ffn_dim
lowerCAmelCase : Dict = encoder_layers
lowerCAmelCase : str = encoder_attention_heads
lowerCAmelCase : List[Any] = decoder_ffn_dim
lowerCAmelCase : List[Any] = decoder_layers
lowerCAmelCase : Union[str, Any] = decoder_attention_heads
lowerCAmelCase : str = dropout
lowerCAmelCase : Dict = attention_dropout
lowerCAmelCase : Union[str, Any] = activation_dropout
lowerCAmelCase : str = activation_function
lowerCAmelCase : Optional[int] = init_std
lowerCAmelCase : Any = init_xavier_std
lowerCAmelCase : Dict = encoder_layerdrop
lowerCAmelCase : int = decoder_layerdrop
lowerCAmelCase : Tuple = encoder_layers
lowerCAmelCase : Optional[int] = auxiliary_loss
lowerCAmelCase : List[str] = position_embedding_type
lowerCAmelCase : Any = backbone
lowerCAmelCase : Union[str, Any] = use_pretrained_backbone
lowerCAmelCase : List[Any] = dilation
# Hungarian matcher
lowerCAmelCase : Tuple = class_cost
lowerCAmelCase : Union[str, Any] = bbox_cost
lowerCAmelCase : Optional[Any] = giou_cost
# Loss coefficients
lowerCAmelCase : List[Any] = mask_loss_coefficient
lowerCAmelCase : Optional[int] = dice_loss_coefficient
lowerCAmelCase : Tuple = bbox_loss_coefficient
lowerCAmelCase : Dict = giou_loss_coefficient
lowerCAmelCase : str = eos_coefficient
super().__init__(is_encoder_decoder=lowercase_ , **lowercase_ )
@property
def _snake_case ( self ) -> int:
return self.encoder_attention_heads
@property
def _snake_case ( self ) -> int:
return self.d_model
@classmethod
def _snake_case ( cls , lowercase_ , **lowercase_ ) -> Any:
return cls(backbone_config=lowercase_ , **lowercase_ )
def _snake_case ( self ) -> Dict[str, any]:
lowerCAmelCase : Optional[int] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
lowerCAmelCase : List[str] = self.backbone_config.to_dict()
lowerCAmelCase : List[Any] = self.__class__.model_type
return output
class _a ( snake_case_ ):
_UpperCamelCase: Any = version.parse("1.11" )
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def _snake_case ( self ) -> float:
return 1e-5
@property
def _snake_case ( self ) -> int:
return 12
| 693 | 1 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class _a :
def __init__( self , lowercase_ , lowercase_=13 , lowercase_=30 , lowercase_=2 , lowercase_=3 , lowercase_=True , lowercase_=True , lowercase_=32 , lowercase_=2 , lowercase_=4 , lowercase_=37 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=10 , lowercase_=0.0_2 , lowercase_=3 , lowercase_=None , lowercase_=2 , ) -> Optional[Any]:
lowerCAmelCase : Dict = parent
lowerCAmelCase : Union[str, Any] = batch_size
lowerCAmelCase : List[str] = image_size
lowerCAmelCase : Dict = patch_size
lowerCAmelCase : str = num_channels
lowerCAmelCase : Tuple = is_training
lowerCAmelCase : Dict = use_labels
lowerCAmelCase : Union[str, Any] = hidden_size
lowerCAmelCase : Any = num_hidden_layers
lowerCAmelCase : Dict = num_attention_heads
lowerCAmelCase : str = intermediate_size
lowerCAmelCase : Optional[int] = hidden_act
lowerCAmelCase : Union[str, Any] = hidden_dropout_prob
lowerCAmelCase : Union[str, Any] = attention_probs_dropout_prob
lowerCAmelCase : Any = type_sequence_label_size
lowerCAmelCase : Optional[Any] = initializer_range
lowerCAmelCase : Tuple = scope
lowerCAmelCase : Dict = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
lowerCAmelCase : Any = (image_size // patch_size) ** 2
lowerCAmelCase : Tuple = num_patches + 2
def _snake_case ( self ) -> Tuple:
lowerCAmelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase : int = None
if self.use_labels:
lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase : Optional[int] = self.get_config()
return config, pixel_values, labels
def _snake_case ( self ) -> Optional[Any]:
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _snake_case ( self , lowercase_ , lowercase_ , lowercase_ ) -> Optional[Any]:
lowerCAmelCase : Tuple = TFDeiTModel(config=lowercase_ )
lowerCAmelCase : Dict = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self , lowercase_ , lowercase_ , lowercase_ ) -> Optional[int]:
lowerCAmelCase : List[str] = TFDeiTForMaskedImageModeling(config=lowercase_ )
lowerCAmelCase : str = model(lowercase_ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCAmelCase : int = 1
lowerCAmelCase : Any = TFDeiTForMaskedImageModeling(lowercase_ )
lowerCAmelCase : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase : Optional[int] = model(lowercase_ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def _snake_case ( self , lowercase_ , lowercase_ , lowercase_ ) -> Union[str, Any]:
lowerCAmelCase : Optional[int] = self.type_sequence_label_size
lowerCAmelCase : Tuple = TFDeiTForImageClassification(lowercase_ )
lowerCAmelCase : int = model(lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCAmelCase : List[str] = 1
lowerCAmelCase : Union[str, Any] = TFDeiTForImageClassification(lowercase_ )
lowerCAmelCase : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase : str = model(lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _snake_case ( self ) -> Tuple:
lowerCAmelCase : List[Any] = self.prepare_config_and_inputs()
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : int = config_and_inputs
lowerCAmelCase : List[str] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class _a ( snake_case_ , snake_case_ , unittest.TestCase ):
_UpperCamelCase: int = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
_UpperCamelCase: Dict = (
{
"feature-extraction": TFDeiTModel,
"image-classification": (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
_UpperCamelCase: List[Any] = False
_UpperCamelCase: Optional[Any] = False
_UpperCamelCase: Tuple = False
_UpperCamelCase: Union[str, Any] = False
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase : Optional[Any] = TFDeiTModelTester(self )
lowerCAmelCase : Optional[Any] = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=37 )
def _snake_case ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason="""DeiT does not use inputs_embeds""" )
def _snake_case ( self ) -> Dict:
pass
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase , lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase : int = model_class(lowercase_ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
lowerCAmelCase : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase_ , tf.keras.layers.Dense ) )
def _snake_case ( self ) -> int:
lowerCAmelCase , lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase : str = model_class(lowercase_ )
lowerCAmelCase : List[str] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase : Optional[Any] = [*signature.parameters.keys()]
lowerCAmelCase : List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowercase_ )
def _snake_case ( self ) -> int:
lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def _snake_case ( self ) -> str:
lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowercase_ )
def _snake_case ( self ) -> Optional[Any]:
lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_ )
def _snake_case ( self , lowercase_ , lowercase_ , lowercase_=False ) -> List[str]:
lowerCAmelCase : int = super()._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_ )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def _snake_case ( self ) -> int:
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase : Optional[Any] = TFDeiTModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def _UpperCAmelCase ( ):
'''simple docstring'''
lowerCAmelCase : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class _a ( unittest.TestCase ):
@cached_property
def _snake_case ( self ) -> Tuple:
return (
DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
if is_vision_available()
else None
)
@slow
def _snake_case ( self ) -> List[Any]:
lowerCAmelCase : str = TFDeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
lowerCAmelCase : List[Any] = self.default_image_processor
lowerCAmelCase : Tuple = prepare_img()
lowerCAmelCase : Tuple = image_processor(images=lowercase_ , return_tensors="""tf""" )
# forward pass
lowerCAmelCase : Any = model(**lowercase_ )
# verify the logits
lowerCAmelCase : List[Any] = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , lowercase_ )
lowerCAmelCase : Optional[int] = tf.constant([-1.0_2_6_6, 0.1_9_1_2, -1.2_8_6_1] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , lowercase_ , atol=1e-4 ) )
| 693 |
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
lowerCAmelCase : int =logging.getLogger()
lowerCAmelCase : str =logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class _a ( snake_case_ ):
def _snake_case ( self , lowercase_ ) -> List[Any]:
os.makedirs(lowercase_ , exist_ok=lowercase_ )
lowerCAmelCase : int = {"""source""": """What is love ?""", """target""": """life"""}
lowerCAmelCase : Optional[Any] = {"""train""": 12, """val""": 2, """test""": 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
lowerCAmelCase : Tuple = """\n""".join([contents[field]] * n_lines[split] )
with open(os.path.join(lowercase_ , f"""{split}.{field}""" ) , """w""" ) as f:
f.write(lowercase_ )
def _snake_case ( self , lowercase_ , lowercase_ = "pytorch" ) -> str:
lowerCAmelCase : Dict = self.get_auto_remove_tmp_dir()
lowerCAmelCase : Union[str, Any] = os.path.join(lowercase_ , """output""" )
lowerCAmelCase : Dict = os.path.join(lowercase_ , """data""" )
self._create_dummy_data(data_dir=lowercase_ )
lowerCAmelCase : str = f"""
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
""".split()
if gpus > 0:
testargs.append(f"""--gpus={gpus}""" )
if is_apex_available():
testargs.append("""--fp16""" )
else:
testargs.append("""--gpus=0""" )
testargs.append("""--distributed_backend=ddp_cpu""" )
testargs.append("""--num_processes=2""" )
lowerCAmelCase : Optional[int] = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(lowercase_ , env=self.get_env() )
lowerCAmelCase : Union[str, Any] = os.path.join(lowercase_ , """metrics.json""" )
with open(lowercase_ ) as f:
lowerCAmelCase : List[str] = json.load(lowercase_ )
return result
@require_torch_gpu
def _snake_case ( self ) -> Any:
lowerCAmelCase : Tuple = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_multi_gpu
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase : Dict = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_gpu
@require_ray
def _snake_case ( self ) -> int:
lowerCAmelCase : Optional[Any] = self._run_finetune(gpus=1 , distributed_retriever="""ray""" )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_multi_gpu
@require_ray
def _snake_case ( self ) -> Optional[Any]:
lowerCAmelCase : Optional[Any] = self._run_finetune(gpus=1 , distributed_retriever="""ray""" )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
| 693 | 1 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ = 1_0_0_0_0_0_0 ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = limit + 1
lowerCAmelCase : Dict = [0] * limit
for first_term in range(1 ,SCREAMING_SNAKE_CASE__ ):
for n in range(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase : Tuple = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
lowerCAmelCase : List[str] = sum(1 for x in frequency[1:limit] if x == 1_0 )
return count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 693 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Optional[int] =logging.get_logger(__name__)
lowerCAmelCase : Optional[int] ={
'transfo-xl-wt103': 'https://huggingface.co/transfo-xl-wt103/resolve/main/config.json',
}
class _a ( snake_case_ ):
_UpperCamelCase: Tuple = "transfo-xl"
_UpperCamelCase: str = ["mems"]
_UpperCamelCase: Dict = {
"n_token": "vocab_size",
"hidden_size": "d_model",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , lowercase_=267735 , lowercase_=[20000, 40000, 200000] , lowercase_=1024 , lowercase_=1024 , lowercase_=16 , lowercase_=64 , lowercase_=4096 , lowercase_=4 , lowercase_=False , lowercase_=18 , lowercase_=1600 , lowercase_=1000 , lowercase_=True , lowercase_=True , lowercase_=0 , lowercase_=-1 , lowercase_=True , lowercase_=0.1 , lowercase_=0.0 , lowercase_=True , lowercase_="normal" , lowercase_=0.0_1 , lowercase_=0.0_1 , lowercase_=0.0_2 , lowercase_=1e-5 , lowercase_=0 , **lowercase_ , ) -> Optional[int]:
lowerCAmelCase : List[str] = vocab_size
lowerCAmelCase : Union[str, Any] = []
self.cutoffs.extend(lowercase_ )
if proj_share_all_but_first:
lowerCAmelCase : Optional[int] = [False] + [True] * len(self.cutoffs )
else:
lowerCAmelCase : List[str] = [False] + [False] * len(self.cutoffs )
lowerCAmelCase : Optional[int] = d_model
lowerCAmelCase : List[Any] = d_embed
lowerCAmelCase : Union[str, Any] = d_head
lowerCAmelCase : List[Any] = d_inner
lowerCAmelCase : Optional[int] = div_val
lowerCAmelCase : List[Any] = pre_lnorm
lowerCAmelCase : Dict = n_layer
lowerCAmelCase : Tuple = n_head
lowerCAmelCase : Any = mem_len
lowerCAmelCase : Union[str, Any] = same_length
lowerCAmelCase : List[Any] = attn_type
lowerCAmelCase : int = clamp_len
lowerCAmelCase : List[str] = sample_softmax
lowerCAmelCase : Optional[int] = adaptive
lowerCAmelCase : Dict = dropout
lowerCAmelCase : Optional[Any] = dropatt
lowerCAmelCase : List[str] = untie_r
lowerCAmelCase : List[str] = init
lowerCAmelCase : Tuple = init_range
lowerCAmelCase : str = proj_init_std
lowerCAmelCase : str = init_std
lowerCAmelCase : Optional[int] = layer_norm_epsilon
super().__init__(eos_token_id=lowercase_ , **lowercase_ )
@property
def _snake_case ( self ) -> Optional[Any]:
# Message copied from Transformer-XL documentation
logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def _snake_case ( self , lowercase_ ) -> Dict:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 693 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase : List[Any] ={'configuration_sew': ['SEW_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SEWConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : int =[
'SEW_PRETRAINED_MODEL_ARCHIVE_LIST',
'SEWForCTC',
'SEWForSequenceClassification',
'SEWModel',
'SEWPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
lowerCAmelCase : List[Any] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 693 |
import torch
from diffusers import DiffusionPipeline
class _a ( snake_case_ ):
def __init__( self , lowercase_ , lowercase_ ) -> int:
super().__init__()
self.register_modules(unet=lowercase_ , scheduler=lowercase_ )
def __call__( self ) -> List[Any]:
lowerCAmelCase : Union[str, Any] = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
lowerCAmelCase : Union[str, Any] = 1
lowerCAmelCase : Dict = self.unet(lowercase_ , lowercase_ ).sample
lowerCAmelCase : str = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ ).prev_sample
lowerCAmelCase : Dict = scheduler_output - scheduler_output + torch.ones_like(lowercase_ )
return result
| 693 | 1 |
import random
from .binary_exp_mod import bin_exp_mod
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=1_0_0_0 ):
'''simple docstring'''
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
lowerCAmelCase : Any = n - 1
lowerCAmelCase : Tuple = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
lowerCAmelCase : Optional[int] = 0
while count < prec:
lowerCAmelCase : Optional[int] = random.randint(2 ,n - 1 )
lowerCAmelCase : Union[str, Any] = bin_exp_mod(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
if b != 1:
lowerCAmelCase : Tuple = True
for _ in range(SCREAMING_SNAKE_CASE__ ):
if b == n - 1:
lowerCAmelCase : int = False
break
lowerCAmelCase : Dict = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
lowerCAmelCase : Optional[Any] =abs(int(input('Enter bound : ').strip()))
print('Here\'s the list of primes:')
print(', '.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 693 |
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def _UpperCAmelCase ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
requests.request("""GET""" ,"""https://huggingface.co""" )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request("""GET""" ,"""https://huggingface.co""" ,timeout=1.0 )
@pytest.mark.integration
def _UpperCAmelCase ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request("""GET""" ,"""https://huggingface.co""" )
def _UpperCAmelCase ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
http_head("""https://huggingface.co""" )
| 693 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _a ( unittest.TestCase ):
def __init__( self , lowercase_ , lowercase_=7 , lowercase_=3 , lowercase_=18 , lowercase_=30 , lowercase_=400 , lowercase_=True , lowercase_=None , lowercase_=True , ) -> int:
lowerCAmelCase : Any = size if size is not None else {"""height""": 18, """width""": 18}
lowerCAmelCase : int = parent
lowerCAmelCase : str = batch_size
lowerCAmelCase : List[str] = num_channels
lowerCAmelCase : int = image_size
lowerCAmelCase : str = min_resolution
lowerCAmelCase : Optional[Any] = max_resolution
lowerCAmelCase : Tuple = do_resize
lowerCAmelCase : Dict = size
lowerCAmelCase : Dict = apply_ocr
def _snake_case ( self ) -> List[str]:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class _a ( snake_case_ , unittest.TestCase ):
_UpperCamelCase: List[Any] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def _snake_case ( self ) -> Tuple:
lowerCAmelCase : Any = LayoutLMvaImageProcessingTester(self )
@property
def _snake_case ( self ) -> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def _snake_case ( self ) -> str:
lowerCAmelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase_ , """do_resize""" ) )
self.assertTrue(hasattr(lowercase_ , """size""" ) )
self.assertTrue(hasattr(lowercase_ , """apply_ocr""" ) )
def _snake_case ( self ) -> List[Any]:
lowerCAmelCase : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
lowerCAmelCase : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def _snake_case ( self ) -> List[Any]:
pass
def _snake_case ( self ) -> int:
# Initialize image_processing
lowerCAmelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , Image.Image )
# Test not batched input
lowerCAmelCase : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
self.assertIsInstance(encoding.words , lowercase_ )
self.assertIsInstance(encoding.boxes , lowercase_ )
# Test batched
lowerCAmelCase : List[Any] = image_processing(lowercase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def _snake_case ( self ) -> Tuple:
# Initialize image_processing
lowerCAmelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , numpify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , np.ndarray )
# Test not batched input
lowerCAmelCase : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
lowerCAmelCase : List[Any] = image_processing(lowercase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def _snake_case ( self ) -> Optional[int]:
# Initialize image_processing
lowerCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , torchify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , torch.Tensor )
# Test not batched input
lowerCAmelCase : List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
lowerCAmelCase : Optional[Any] = image_processing(lowercase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def _snake_case ( self ) -> List[str]:
# with apply_OCR = True
lowerCAmelCase : List[str] = LayoutLMvaImageProcessor()
from datasets import load_dataset
lowerCAmelCase : str = load_dataset("""hf-internal-testing/fixtures_docvqa""" , split="""test""" )
lowerCAmelCase : str = Image.open(ds[0]["""file"""] ).convert("""RGB""" )
lowerCAmelCase : Optional[int] = image_processing(lowercase_ , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
lowerCAmelCase : Optional[int] = [["""11:14""", """to""", """11:39""", """a.m""", """11:39""", """to""", """11:44""", """a.m.""", """11:44""", """a.m.""", """to""", """12:25""", """p.m.""", """12:25""", """to""", """12:58""", """p.m.""", """12:58""", """to""", """4:00""", """p.m.""", """2:00""", """to""", """5:00""", """p.m.""", """Coffee""", """Break""", """Coffee""", """will""", """be""", """served""", """for""", """men""", """and""", """women""", """in""", """the""", """lobby""", """adjacent""", """to""", """exhibit""", """area.""", """Please""", """move""", """into""", """exhibit""", """area.""", """(Exhibits""", """Open)""", """TRRF""", """GENERAL""", """SESSION""", """(PART""", """|)""", """Presiding:""", """Lee""", """A.""", """Waller""", """TRRF""", """Vice""", """President""", """“Introductory""", """Remarks”""", """Lee""", """A.""", """Waller,""", """TRRF""", """Vice""", """Presi-""", """dent""", """Individual""", """Interviews""", """with""", """TRRF""", """Public""", """Board""", """Members""", """and""", """Sci-""", """entific""", """Advisory""", """Council""", """Mem-""", """bers""", """Conducted""", """by""", """TRRF""", """Treasurer""", """Philip""", """G.""", """Kuehn""", """to""", """get""", """answers""", """which""", """the""", """public""", """refrigerated""", """warehousing""", """industry""", """is""", """looking""", """for.""", """Plus""", """questions""", """from""", """the""", """floor.""", """Dr.""", """Emil""", """M.""", """Mrak,""", """University""", """of""", """Cal-""", """ifornia,""", """Chairman,""", """TRRF""", """Board;""", """Sam""", """R.""", """Cecil,""", """University""", """of""", """Georgia""", """College""", """of""", """Agriculture;""", """Dr.""", """Stanley""", """Charm,""", """Tufts""", """University""", """School""", """of""", """Medicine;""", """Dr.""", """Robert""", """H.""", """Cotton,""", """ITT""", """Continental""", """Baking""", """Company;""", """Dr.""", """Owen""", """Fennema,""", """University""", """of""", """Wis-""", """consin;""", """Dr.""", """Robert""", """E.""", """Hardenburg,""", """USDA.""", """Questions""", """and""", """Answers""", """Exhibits""", """Open""", """Capt.""", """Jack""", """Stoney""", """Room""", """TRRF""", """Scientific""", """Advisory""", """Council""", """Meeting""", """Ballroom""", """Foyer"""]] # noqa: E231
lowerCAmelCase : Tuple = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , lowercase_ )
self.assertListEqual(encoding.boxes , lowercase_ )
# with apply_OCR = False
lowerCAmelCase : Union[str, Any] = LayoutLMvaImageProcessor(apply_ocr=lowercase_ )
lowerCAmelCase : Dict = image_processing(lowercase_ , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 693 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class _a ( unittest.TestCase ):
def __init__( self , lowercase_ , lowercase_=7 , lowercase_=3 , lowercase_=30 , lowercase_=400 , lowercase_=True , lowercase_=None , lowercase_=True , lowercase_=1 / 255 , lowercase_=True , lowercase_=[0.5, 0.5, 0.5] , lowercase_=[0.5, 0.5, 0.5] , lowercase_=True , ) -> Tuple:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
lowerCAmelCase : Optional[Any] = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333}
lowerCAmelCase : Optional[int] = parent
lowerCAmelCase : Optional[int] = batch_size
lowerCAmelCase : Dict = num_channels
lowerCAmelCase : str = min_resolution
lowerCAmelCase : Optional[Any] = max_resolution
lowerCAmelCase : Optional[int] = do_resize
lowerCAmelCase : List[str] = size
lowerCAmelCase : Dict = do_rescale
lowerCAmelCase : Union[str, Any] = rescale_factor
lowerCAmelCase : int = do_normalize
lowerCAmelCase : Union[str, Any] = image_mean
lowerCAmelCase : Dict = image_std
lowerCAmelCase : Optional[int] = do_pad
def _snake_case ( self ) -> Any:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def _snake_case ( self , lowercase_ , lowercase_=False ) -> List[Any]:
if not batched:
lowerCAmelCase : Tuple = image_inputs[0]
if isinstance(lowercase_ , Image.Image ):
lowerCAmelCase , lowerCAmelCase : Dict = image.size
else:
lowerCAmelCase , lowerCAmelCase : Tuple = image.shape[1], image.shape[2]
if w < h:
lowerCAmelCase : Union[str, Any] = int(self.size["""shortest_edge"""] * h / w )
lowerCAmelCase : Optional[Any] = self.size["""shortest_edge"""]
elif w > h:
lowerCAmelCase : List[Any] = self.size["""shortest_edge"""]
lowerCAmelCase : List[Any] = int(self.size["""shortest_edge"""] * w / h )
else:
lowerCAmelCase : Optional[int] = self.size["""shortest_edge"""]
lowerCAmelCase : List[str] = self.size["""shortest_edge"""]
else:
lowerCAmelCase : Optional[int] = []
for image in image_inputs:
lowerCAmelCase , lowerCAmelCase : int = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCAmelCase : Any = max(lowercase_ , key=lambda lowercase_ : item[0] )[0]
lowerCAmelCase : Union[str, Any] = max(lowercase_ , key=lambda lowercase_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _a ( snake_case_ , unittest.TestCase ):
_UpperCamelCase: Optional[Any] = DetrImageProcessor if is_vision_available() else None
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase : List[str] = DetrImageProcessingTester(self )
@property
def _snake_case ( self ) -> str:
return self.image_processor_tester.prepare_image_processor_dict()
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase_ , """image_mean""" ) )
self.assertTrue(hasattr(lowercase_ , """image_std""" ) )
self.assertTrue(hasattr(lowercase_ , """do_normalize""" ) )
self.assertTrue(hasattr(lowercase_ , """do_rescale""" ) )
self.assertTrue(hasattr(lowercase_ , """rescale_factor""" ) )
self.assertTrue(hasattr(lowercase_ , """do_resize""" ) )
self.assertTrue(hasattr(lowercase_ , """size""" ) )
self.assertTrue(hasattr(lowercase_ , """do_pad""" ) )
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333} )
self.assertEqual(image_processor.do_pad , lowercase_ )
lowerCAmelCase : Optional[Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowercase_ )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad , lowercase_ )
def _snake_case ( self ) -> List[Any]:
pass
def _snake_case ( self ) -> List[Any]:
# Initialize image_processing
lowerCAmelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , Image.Image )
# Test not batched input
lowerCAmelCase : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
lowerCAmelCase , lowerCAmelCase : Tuple = self.image_processor_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase , lowerCAmelCase : str = self.image_processor_tester.get_expected_values(lowercase_ , batched=lowercase_ )
lowerCAmelCase : Optional[int] = image_processing(lowercase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _snake_case ( self ) -> Optional[int]:
# Initialize image_processing
lowerCAmelCase : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , numpify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , np.ndarray )
# Test not batched input
lowerCAmelCase : List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
lowerCAmelCase , lowerCAmelCase : Dict = self.image_processor_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase : List[Any] = image_processing(lowercase_ , return_tensors="""pt""" ).pixel_values
lowerCAmelCase , lowerCAmelCase : int = self.image_processor_tester.get_expected_values(lowercase_ , batched=lowercase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _snake_case ( self ) -> List[str]:
# Initialize image_processing
lowerCAmelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , torchify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , torch.Tensor )
# Test not batched input
lowerCAmelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
lowerCAmelCase , lowerCAmelCase : str = self.image_processor_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase : str = image_processing(lowercase_ , return_tensors="""pt""" ).pixel_values
lowerCAmelCase , lowerCAmelCase : List[str] = self.image_processor_tester.get_expected_values(lowercase_ , batched=lowercase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _snake_case ( self ) -> int:
# prepare image and target
lowerCAmelCase : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
lowerCAmelCase : str = json.loads(f.read() )
lowerCAmelCase : List[Any] = {"""image_id""": 39769, """annotations""": target}
# encode them
lowerCAmelCase : Dict = DetrImageProcessor.from_pretrained("""facebook/detr-resnet-50""" )
lowerCAmelCase : List[str] = image_processing(images=lowercase_ , annotations=lowercase_ , return_tensors="""pt""" )
# verify pixel values
lowerCAmelCase : Union[str, Any] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , lowercase_ )
lowerCAmelCase : Union[str, Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , lowercase_ , atol=1e-4 ) )
# verify area
lowerCAmelCase : List[str] = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , lowercase_ ) )
# verify boxes
lowerCAmelCase : Tuple = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , lowercase_ )
lowerCAmelCase : Dict = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , lowercase_ , atol=1e-3 ) )
# verify image_id
lowerCAmelCase : List[Any] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , lowercase_ ) )
# verify is_crowd
lowerCAmelCase : Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , lowercase_ ) )
# verify class_labels
lowerCAmelCase : str = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , lowercase_ ) )
# verify orig_size
lowerCAmelCase : int = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , lowercase_ ) )
# verify size
lowerCAmelCase : str = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , lowercase_ ) )
@slow
def _snake_case ( self ) -> int:
# prepare image, target and masks_path
lowerCAmelCase : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
lowerCAmelCase : Any = json.loads(f.read() )
lowerCAmelCase : Optional[Any] = {"""file_name""": """000000039769.png""", """image_id""": 39769, """segments_info""": target}
lowerCAmelCase : List[str] = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
lowerCAmelCase : Any = DetrImageProcessor.from_pretrained("""facebook/detr-resnet-50-panoptic""" )
lowerCAmelCase : Tuple = image_processing(images=lowercase_ , annotations=lowercase_ , masks_path=lowercase_ , return_tensors="""pt""" )
# verify pixel values
lowerCAmelCase : Tuple = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , lowercase_ )
lowerCAmelCase : str = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , lowercase_ , atol=1e-4 ) )
# verify area
lowerCAmelCase : Union[str, Any] = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , lowercase_ ) )
# verify boxes
lowerCAmelCase : Optional[int] = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , lowercase_ )
lowerCAmelCase : Union[str, Any] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , lowercase_ , atol=1e-3 ) )
# verify image_id
lowerCAmelCase : Tuple = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , lowercase_ ) )
# verify is_crowd
lowerCAmelCase : Any = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , lowercase_ ) )
# verify class_labels
lowerCAmelCase : Tuple = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , lowercase_ ) )
# verify masks
lowerCAmelCase : Union[str, Any] = 822873
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , lowercase_ )
# verify orig_size
lowerCAmelCase : str = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , lowercase_ ) )
# verify size
lowerCAmelCase : List[str] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , lowercase_ ) )
| 693 | 1 |
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
lowerCAmelCase : List[Any] ='\\n Text data.\n Second line of data.'
lowerCAmelCase : Union[str, Any] ='file'
@pytest.fixture(scope="""session""" )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : List[str] = tmp_path_factory.mktemp("""data""" ) / (FILE_PATH + """.zstd""")
lowerCAmelCase : str = bytes(SCREAMING_SNAKE_CASE__ ,"""utf-8""" )
with zstd.open(SCREAMING_SNAKE_CASE__ ,"""wb""" ) as f:
f.write(SCREAMING_SNAKE_CASE__ )
return path
@pytest.fixture
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
with open(os.path.join(tmpfs.local_root_dir ,SCREAMING_SNAKE_CASE__ ) ,"""w""" ) as f:
f.write(SCREAMING_SNAKE_CASE__ )
return FILE_PATH
@pytest.mark.parametrize("""compression_format""" ,["""gzip""", """xz""", """zstd"""] )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Dict = {"""gzip""": gz_file, """xz""": xz_file, """zstd""": zstd_path}
lowerCAmelCase : Optional[int] = input_paths[compression_format]
lowerCAmelCase : Optional[Any] = tmp_path / """cache"""
lowerCAmelCase : List[str] = DownloadConfig(cache_dir=SCREAMING_SNAKE_CASE__ ,extract_compressed_file=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Tuple = cached_path(SCREAMING_SNAKE_CASE__ ,download_config=SCREAMING_SNAKE_CASE__ )
with open(SCREAMING_SNAKE_CASE__ ) as f:
lowerCAmelCase : Union[str, Any] = f.read()
with open(SCREAMING_SNAKE_CASE__ ) as f:
lowerCAmelCase : Optional[int] = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("""default_extracted""" ,[True, False] )
@pytest.mark.parametrize("""default_cache_dir""" ,[True, False] )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : List[str] = """custom_cache"""
lowerCAmelCase : Optional[int] = """custom_extracted_dir"""
lowerCAmelCase : str = tmp_path / """custom_extracted_path"""
if default_extracted:
lowerCAmelCase : int = ("""downloads""" if default_cache_dir else custom_cache_dir, """extracted""")
else:
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_DIR""" ,SCREAMING_SNAKE_CASE__ )
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_PATH""" ,str(SCREAMING_SNAKE_CASE__ ) )
lowerCAmelCase : Optional[Any] = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
lowerCAmelCase : Tuple = xz_file
lowerCAmelCase : Optional[int] = (
DownloadConfig(extract_compressed_file=SCREAMING_SNAKE_CASE__ )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir ,extract_compressed_file=SCREAMING_SNAKE_CASE__ )
)
lowerCAmelCase : Dict = cached_path(SCREAMING_SNAKE_CASE__ ,download_config=SCREAMING_SNAKE_CASE__ )
assert Path(SCREAMING_SNAKE_CASE__ ).parent.parts[-2:] == expected
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Dict = str(Path(SCREAMING_SNAKE_CASE__ ).resolve() )
assert cached_path(SCREAMING_SNAKE_CASE__ ) == text_file
# relative path
lowerCAmelCase : str = str(Path(SCREAMING_SNAKE_CASE__ ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(SCREAMING_SNAKE_CASE__ ) == text_file
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : List[str] = str(tmp_path.resolve() / """__missing_file__.txt""" )
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
cached_path(SCREAMING_SNAKE_CASE__ )
# relative path
lowerCAmelCase : List[Any] = """./__missing_file__.txt"""
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
cached_path(SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = get_from_cache(F"""tmp://{tmpfs_file}""" )
with open(SCREAMING_SNAKE_CASE__ ) as f:
lowerCAmelCase : int = f.read()
assert output_file_content == FILE_CONTENT
@patch("""datasets.config.HF_DATASETS_OFFLINE""" ,SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( ):
'''simple docstring'''
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
cached_path("""https://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" ,SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
http_get("""https://huggingface.co""" ,temp_file=SCREAMING_SNAKE_CASE__ )
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
http_head("""https://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" ,SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : List[str] = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
ftp_get("""ftp://huggingface.co""" ,temp_file=SCREAMING_SNAKE_CASE__ )
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
ftp_head("""ftp://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" ,SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
fsspec_get("""s3://huggingface.co""" ,temp_file=SCREAMING_SNAKE_CASE__ )
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
fsspec_head("""s3://huggingface.co""" )
| 693 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Tuple = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Dict = 0
while b > 0:
if b & 1:
lowerCAmelCase : Optional[int] = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 693 | 1 |
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
lowerCAmelCase : Tuple =False
lowerCAmelCase : List[Any] =True
lowerCAmelCase : Optional[Any] =False
if __name__ == "__main__":
lowerCAmelCase : Union[str, Any] =argparse.ArgumentParser()
parser.add_argument(
'--repo_path',
default=None,
type=str,
required=True,
help='The config json file corresponding to the architecture.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
lowerCAmelCase : int =parser.parse_args()
lowerCAmelCase : int ={
'image_size': 'sample_size',
'num_res_blocks': 'layers_per_block',
'block_channels': 'block_out_channels',
'down_blocks': 'down_block_types',
'up_blocks': 'up_block_types',
'downscale_freq_shift': 'freq_shift',
'resnet_num_groups': 'norm_num_groups',
'resnet_act_fn': 'act_fn',
'resnet_eps': 'norm_eps',
'num_head_channels': 'attention_head_dim',
}
lowerCAmelCase : Union[str, Any] ={
'time_steps': 'time_proj',
'mid': 'mid_block',
'downsample_blocks': 'down_blocks',
'upsample_blocks': 'up_blocks',
}
lowerCAmelCase : str ='' if has_file(args.repo_path, 'config.json') else 'unet'
with open(os.path.join(args.repo_path, subfolder, 'config.json'), 'r', encoding='utf-8') as reader:
lowerCAmelCase : List[str] =reader.read()
lowerCAmelCase : List[Any] =json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, 'config.json'):
lowerCAmelCase : Any =UNetaDModel(**config)
else:
lowerCAmelCase : Optional[Any] =UNetaDConditionModel if 'ldm-text2im-large-256' in args.repo_path else UNetaDModel
lowerCAmelCase : Union[str, Any] =class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
lowerCAmelCase : Any =dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
lowerCAmelCase : int =config[key]
del config[key]
lowerCAmelCase : int =[k.replace('UNetRes', '') for k in config['down_block_types']]
lowerCAmelCase : List[Any] =[k.replace('UNetRes', '') for k in config['up_block_types']]
if do_only_weights:
lowerCAmelCase : List[Any] =torch.load(os.path.join(args.repo_path, subfolder, 'diffusion_pytorch_model.bin'))
lowerCAmelCase : Optional[int] ={}
for param_key, param_value in state_dict.items():
if param_key.endswith('.op.bias') or param_key.endswith('.op.weight'):
continue
lowerCAmelCase : str =False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split('.')[0] == key:
lowerCAmelCase : List[str] =param_value
lowerCAmelCase : Tuple =True
if not has_changed:
lowerCAmelCase : Optional[Any] =param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 693 |
from math import factorial
class _a :
def __init__( self , lowercase_ , lowercase_ ) -> Optional[Any]:
lowerCAmelCase : Union[str, Any] = real
if isinstance(lowercase_ , lowercase_ ):
lowerCAmelCase : Tuple = [1] * rank
else:
lowerCAmelCase : Any = rank
def __repr__( self ) -> int:
return (
f"""{self.real}+"""
f"""{'+'.join(str(lowercase_ )+'E'+str(n+1 )for n,dual in enumerate(self.duals ) )}"""
)
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase : List[Any] = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , lowercase_ )
def __add__( self , lowercase_ ) -> Tuple:
if not isinstance(lowercase_ , lowercase_ ):
return Dual(self.real + other , self.duals )
lowerCAmelCase : int = self.duals.copy()
lowerCAmelCase : Tuple = other.duals.copy()
if len(lowercase_ ) > len(lowercase_ ):
o_dual.extend([1] * (len(lowercase_ ) - len(lowercase_ )) )
elif len(lowercase_ ) < len(lowercase_ ):
s_dual.extend([1] * (len(lowercase_ ) - len(lowercase_ )) )
lowerCAmelCase : List[Any] = []
for i in range(len(lowercase_ ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , lowercase_ )
_UpperCamelCase: List[Any] = __add__
def __sub__( self , lowercase_ ) -> Union[str, Any]:
return self + other * -1
def __mul__( self , lowercase_ ) -> Optional[int]:
if not isinstance(lowercase_ , lowercase_ ):
lowerCAmelCase : Union[str, Any] = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , lowercase_ )
lowerCAmelCase : Union[str, Any] = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , lowercase_ )
_UpperCamelCase: str = __mul__
def __truediv__( self , lowercase_ ) -> Optional[Any]:
if not isinstance(lowercase_ , lowercase_ ):
lowerCAmelCase : List[str] = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , lowercase_ )
raise ValueError
def __floordiv__( self , lowercase_ ) -> int:
if not isinstance(lowercase_ , lowercase_ ):
lowerCAmelCase : List[Any] = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , lowercase_ )
raise ValueError
def __pow__( self , lowercase_ ) -> str:
if n < 0 or isinstance(lowercase_ , lowercase_ ):
raise ValueError("""power must be a positive integer""" )
if n == 0:
return 1
if n == 1:
return self
lowerCAmelCase : int = self
for _ in range(n - 1 ):
x *= self
return x
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if not callable(SCREAMING_SNAKE_CASE__ ):
raise ValueError("""differentiate() requires a function as input for func""" )
if not isinstance(SCREAMING_SNAKE_CASE__ ,(float, int) ):
raise ValueError("""differentiate() requires a float as input for position""" )
if not isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
raise ValueError("""differentiate() requires an int as input for order""" )
lowerCAmelCase : List[Any] = Dual(SCREAMING_SNAKE_CASE__ ,1 )
lowerCAmelCase : Optional[Any] = func(SCREAMING_SNAKE_CASE__ )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return y**2 * y**4
print(differentiate(f, 9, 2))
| 693 | 1 |
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if is_torch_version("""<""" ,"""2.0.0""" ) or not hasattr(SCREAMING_SNAKE_CASE__ ,"""_dynamo""" ):
return False
return isinstance(SCREAMING_SNAKE_CASE__ ,torch._dynamo.eval_frame.OptimizedModule )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ = True ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
lowerCAmelCase : List[str] = is_compiled_module(SCREAMING_SNAKE_CASE__ )
if is_compiled:
lowerCAmelCase : Optional[int] = model
lowerCAmelCase : int = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase : Union[str, Any] = model.module
if not keep_fpaa_wrapper:
lowerCAmelCase : Union[str, Any] = getattr(SCREAMING_SNAKE_CASE__ ,"""forward""" )
lowerCAmelCase : int = model.__dict__.pop("""_original_forward""" ,SCREAMING_SNAKE_CASE__ )
if original_forward is not None:
while hasattr(SCREAMING_SNAKE_CASE__ ,"""__wrapped__""" ):
lowerCAmelCase : str = forward.__wrapped__
if forward == original_forward:
break
lowerCAmelCase : Union[str, Any] = forward
if getattr(SCREAMING_SNAKE_CASE__ ,"""_converted_to_transformer_engine""" ,SCREAMING_SNAKE_CASE__ ):
convert_model(SCREAMING_SNAKE_CASE__ ,to_transformer_engine=SCREAMING_SNAKE_CASE__ )
if is_compiled:
lowerCAmelCase : Optional[Any] = model
lowerCAmelCase : List[Any] = compiled_model
return model
def _UpperCAmelCase ( ):
'''simple docstring'''
PartialState().wait_for_everyone()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if PartialState().distributed_type == DistributedType.TPU:
xm.save(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
elif PartialState().local_process_index == 0:
torch.save(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
@contextmanager
def _UpperCAmelCase ( **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
for key, value in kwargs.items():
lowerCAmelCase : Optional[Any] = str(SCREAMING_SNAKE_CASE__ )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if not hasattr(SCREAMING_SNAKE_CASE__ ,"""__qualname__""" ) and not hasattr(SCREAMING_SNAKE_CASE__ ,"""__name__""" ):
lowerCAmelCase : Dict = getattr(SCREAMING_SNAKE_CASE__ ,"""__class__""" ,SCREAMING_SNAKE_CASE__ )
if hasattr(SCREAMING_SNAKE_CASE__ ,"""__qualname__""" ):
return obj.__qualname__
if hasattr(SCREAMING_SNAKE_CASE__ ,"""__name__""" ):
return obj.__name__
return str(SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
for key, value in source.items():
if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase : int = destination.setdefault(SCREAMING_SNAKE_CASE__ ,{} )
merge_dicts(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
else:
lowerCAmelCase : Any = value
return destination
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ = None ):
'''simple docstring'''
if port is None:
lowerCAmelCase : Tuple = 2_9_5_0_0
with socket.socket(socket.AF_INET ,socket.SOCK_STREAM ) as s:
return s.connect_ex(("""localhost""", port) ) == 0
| 693 |
from ..utils import DummyObject, requires_backends
class _a ( metaclass=snake_case_ ):
_UpperCamelCase: List[Any] = ["keras_nlp"]
def __init__( self , *lowercase_ , **lowercase_ ) -> Tuple:
requires_backends(self , ["""keras_nlp"""] )
| 693 | 1 |
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : List[Any] = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""encoder.embed_positions._float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase , lowerCAmelCase : Union[str, Any] = emb.weight.shape
lowerCAmelCase : Any = nn.Linear(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,bias=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Dict = emb.weight.data
return lin_layer
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : Tuple = {}
for old_key in state_dict.keys():
lowerCAmelCase : Any = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
lowerCAmelCase : Any = key.replace("""moe_layer.experts.0""" ,F"""ffn.experts.expert_{expert_idx}""" )
else:
lowerCAmelCase : int = key.replace("""moe_layer.experts.""" ,"""ffn.experts.expert_""" )
if "gate" in key:
lowerCAmelCase : Tuple = key.replace(""".moe_layer.gate.wg""" ,""".ffn.router.classifier""" )
if "fc2" and "experts" not in key:
lowerCAmelCase : List[str] = key.replace(""".fc2.""" ,""".ffn.fc2.""" )
if "fc1" and "experts" not in key:
lowerCAmelCase : Tuple = key.replace(""".fc1.""" ,""".ffn.fc1.""" )
if ".encoder_attn." in key:
lowerCAmelCase : Union[str, Any] = key.replace(""".encoder_attn.""" ,""".cross_attention.""" )
if "encoder_attn_layer_norm" in key:
lowerCAmelCase : Any = key.replace("""encoder_attn_layer_norm""" ,"""cross_attention_layer_norm""" )
if "final_layer_norm" in key:
lowerCAmelCase : List[str] = key.replace("""final_layer_norm""" ,"""ff_layer_norm""" )
lowerCAmelCase : List[Any] = state_dict[old_key]
return new_dict
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ = WEIGHTS_NAME ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = []
lowerCAmelCase : Dict = 0
os.makedirs(SCREAMING_SNAKE_CASE__ ,exist_ok=SCREAMING_SNAKE_CASE__ )
for expert in range(SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase : Dict = switch_checkpoint_path + F"""-rank-{expert}.pt"""
if os.path.isfile(SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase : str = torch.load(SCREAMING_SNAKE_CASE__ )["""model"""]
remove_ignore_keys_(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : int = rename_fairseq_keys(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Optional[Any] = os.path.join(
SCREAMING_SNAKE_CASE__ ,weights_name.replace(""".bin""" ,F"""-{len(SCREAMING_SNAKE_CASE__ )+1:05d}-of-???.bin""" ) )
torch.save(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(SCREAMING_SNAKE_CASE__ )[0]].dtype )
# Add the last block
lowerCAmelCase : Tuple = os.path.join(SCREAMING_SNAKE_CASE__ ,weights_name.replace(""".bin""" ,F"""-{len(SCREAMING_SNAKE_CASE__ )+1:05d}-of-???.bin""" ) )
lowerCAmelCase : Optional[int] = torch.load(switch_checkpoint_path + """-shared.pt""" )["""model"""]
remove_ignore_keys_(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Any = rename_fairseq_keys(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Optional[int] = shared_weights["""decoder.embed_tokens.weight"""]
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(SCREAMING_SNAKE_CASE__ ) == 1:
lowerCAmelCase : Any = os.path.join(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
torch.save(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
# Otherwise, let's build the index
lowerCAmelCase : int = {}
for idx, shard in enumerate(SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase : str = weights_name.replace(""".bin""" ,F"""-{idx+1:05d}-of-{len(SCREAMING_SNAKE_CASE__ ):05d}.bin""" )
lowerCAmelCase : Optional[Any] = os.path.join(SCREAMING_SNAKE_CASE__ ,weights_name.replace(""".bin""" ,F"""-{idx+1:05d}-of-???.bin""" ) )
os.rename(SCREAMING_SNAKE_CASE__ ,os.path.join(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) )
for key in shard:
lowerCAmelCase : Union[str, Any] = shard_file
# Add the metadata
lowerCAmelCase : Dict = {"""total_size""": total_size}
lowerCAmelCase : Tuple = {"""metadata""": metadata, """weight_map""": weight_map}
with open(os.path.join(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) ,"""w""" ,encoding="""utf-8""" ) as f:
lowerCAmelCase : Tuple = json.dumps(SCREAMING_SNAKE_CASE__ ,indent=2 ,sort_keys=SCREAMING_SNAKE_CASE__ ) + """\n"""
f.write(SCREAMING_SNAKE_CASE__ )
return metadata, index
if __name__ == "__main__":
lowerCAmelCase : Dict =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--nllb_moe_checkpoint_path',
default='/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--dtype', default='float32', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b',
type=str,
required=False,
help='Path to the output pytorch model.',
)
lowerCAmelCase : Dict =parser.parse_args()
lowerCAmelCase , lowerCAmelCase : List[str] =shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
lowerCAmelCase : Any =NllbMoeConfig.from_pretrained(
'facebook/nllb-200-3.3B', encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
lowerCAmelCase : int =NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print('Done')
model.save_pretrained(args.pytorch_dump_folder_path)
| 693 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 693 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase : List[Any] ={
'configuration_mobilevit': ['MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MobileViTConfig', 'MobileViTOnnxConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[Any] =['MobileViTFeatureExtractor']
lowerCAmelCase : int =['MobileViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : str =[
'MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileViTForImageClassification',
'MobileViTForSemanticSegmentation',
'MobileViTModel',
'MobileViTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Dict =[
'TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFMobileViTForImageClassification',
'TFMobileViTForSemanticSegmentation',
'TFMobileViTModel',
'TFMobileViTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Optional[int] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 693 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if p < 2:
raise ValueError("""p should not be less than 2!""" )
elif p == 2:
return True
lowerCAmelCase : List[Any] = 4
lowerCAmelCase : Tuple = (1 << p) - 1
for _ in range(p - 2 ):
lowerCAmelCase : Dict = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 693 | 1 |
import torch
from torch import nn
class _a ( nn.Module ):
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_=1 , lowercase_=False ) -> Dict:
super().__init__()
lowerCAmelCase : Optional[int] = n_token
lowerCAmelCase : Optional[int] = d_embed
lowerCAmelCase : str = d_proj
lowerCAmelCase : str = cutoffs + [n_token]
lowerCAmelCase : Any = [0] + self.cutoffs
lowerCAmelCase : Optional[int] = div_val
lowerCAmelCase : Any = self.cutoffs[0]
lowerCAmelCase : Optional[int] = len(self.cutoffs ) - 1
lowerCAmelCase : Optional[Any] = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
lowerCAmelCase : Tuple = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
lowerCAmelCase : Tuple = nn.Parameter(torch.zeros(self.n_clusters ) )
lowerCAmelCase : str = nn.ModuleList()
lowerCAmelCase : Optional[Any] = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(lowercase_ , lowercase_ ) ) )
else:
self.out_projs.append(lowercase_ )
self.out_layers.append(nn.Linear(lowercase_ , lowercase_ ) )
else:
for i in range(len(self.cutoffs ) ):
lowerCAmelCase , lowerCAmelCase : Tuple = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCAmelCase : Dict = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(lowercase_ , lowercase_ ) ) )
self.out_layers.append(nn.Linear(lowercase_ , r_idx - l_idx ) )
lowerCAmelCase : Any = keep_order
def _snake_case ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Optional[int]:
if proj is None:
lowerCAmelCase : List[str] = nn.functional.linear(lowercase_ , lowercase_ , bias=lowercase_ )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
lowerCAmelCase : Optional[int] = nn.functional.linear(lowercase_ , proj.t().contiguous() )
lowerCAmelCase : Tuple = nn.functional.linear(lowercase_ , lowercase_ , bias=lowercase_ )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def _snake_case ( self , lowercase_ , lowercase_=None , lowercase_=False ) -> Any:
if labels is not None:
# Shift so that tokens < n predict n
lowerCAmelCase : List[Any] = hidden[..., :-1, :].contiguous()
lowerCAmelCase : int = labels[..., 1:].contiguous()
lowerCAmelCase : str = hidden.view(-1 , hidden.size(-1 ) )
lowerCAmelCase : List[Any] = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError("""Input and labels should have the same size in the batch dimension.""" )
else:
lowerCAmelCase : str = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
lowerCAmelCase : List[str] = self._compute_logit(lowercase_ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
lowerCAmelCase : Dict = labels != -100
lowerCAmelCase : Tuple = torch.zeros_like(lowercase_ , dtype=hidden.dtype , device=hidden.device )
lowerCAmelCase : Dict = (
-nn.functional.log_softmax(lowercase_ , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
lowerCAmelCase : Dict = nn.functional.log_softmax(lowercase_ , dim=-1 )
else:
# construct weights and biases
lowerCAmelCase , lowerCAmelCase : int = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
lowerCAmelCase , lowerCAmelCase : List[str] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCAmelCase : int = self.out_layers[0].weight[l_idx:r_idx]
lowerCAmelCase : Optional[int] = self.out_layers[0].bias[l_idx:r_idx]
else:
lowerCAmelCase : Tuple = self.out_layers[i].weight
lowerCAmelCase : int = self.out_layers[i].bias
if i == 0:
lowerCAmelCase : Optional[int] = torch.cat([weight_i, self.cluster_weight] , dim=0 )
lowerCAmelCase : List[str] = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(lowercase_ )
biases.append(lowercase_ )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Any = weights[0], biases[0], self.out_projs[0]
lowerCAmelCase : Optional[Any] = self._compute_logit(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
lowerCAmelCase : Tuple = nn.functional.log_softmax(lowercase_ , dim=1 )
if labels is None:
lowerCAmelCase : int = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
lowerCAmelCase : List[str] = torch.zeros_like(lowercase_ , dtype=hidden.dtype , device=hidden.device )
lowerCAmelCase : int = 0
lowerCAmelCase : List[Any] = [0] + self.cutoffs
for i in range(len(lowercase_ ) - 1 ):
lowerCAmelCase , lowerCAmelCase : Union[str, Any] = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
lowerCAmelCase : List[str] = (labels >= l_idx) & (labels < r_idx)
lowerCAmelCase : List[str] = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
lowerCAmelCase : Dict = labels.index_select(0 , lowercase_ ) - l_idx
lowerCAmelCase : Union[str, Any] = head_logprob.index_select(0 , lowercase_ )
lowerCAmelCase : str = hidden.index_select(0 , lowercase_ )
else:
lowerCAmelCase : Optional[int] = hidden
if i == 0:
if labels is not None:
lowerCAmelCase : List[str] = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
lowerCAmelCase : Union[str, Any] = head_logprob[:, : self.cutoffs[0]]
else:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : List[str] = weights[i], biases[i], self.out_projs[i]
lowerCAmelCase : Tuple = self._compute_logit(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
lowerCAmelCase : Any = nn.functional.log_softmax(lowercase_ , dim=1 )
lowerCAmelCase : Tuple = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
lowerCAmelCase : str = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
lowerCAmelCase : Dict = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
lowerCAmelCase : Union[str, Any] = logprob_i
if labels is not None:
if (hasattr(self , """keep_order""" ) and self.keep_order) or keep_order:
out.index_copy_(0 , lowercase_ , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def _snake_case ( self , lowercase_ ) -> List[str]:
if self.n_clusters == 0:
lowerCAmelCase : Union[str, Any] = self._compute_logit(lowercase_ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(lowercase_ , dim=-1 )
else:
# construct weights and biases
lowerCAmelCase , lowerCAmelCase : Dict = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
lowerCAmelCase , lowerCAmelCase : List[Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCAmelCase : Union[str, Any] = self.out_layers[0].weight[l_idx:r_idx]
lowerCAmelCase : Optional[Any] = self.out_layers[0].bias[l_idx:r_idx]
else:
lowerCAmelCase : str = self.out_layers[i].weight
lowerCAmelCase : List[Any] = self.out_layers[i].bias
if i == 0:
lowerCAmelCase : Dict = torch.cat([weight_i, self.cluster_weight] , dim=0 )
lowerCAmelCase : List[str] = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(lowercase_ )
biases.append(lowercase_ )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Optional[int] = weights[0], biases[0], self.out_projs[0]
lowerCAmelCase : List[str] = self._compute_logit(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
lowerCAmelCase : List[str] = hidden.new_empty((head_logit.size(0 ), self.n_token) )
lowerCAmelCase : List[Any] = nn.functional.log_softmax(lowercase_ , dim=1 )
lowerCAmelCase : Optional[int] = [0] + self.cutoffs
for i in range(len(lowercase_ ) - 1 ):
lowerCAmelCase , lowerCAmelCase : Optional[int] = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
lowerCAmelCase : Dict = head_logprob[:, : self.cutoffs[0]]
else:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Dict = weights[i], biases[i], self.out_projs[i]
lowerCAmelCase : Any = self._compute_logit(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
lowerCAmelCase : List[Any] = nn.functional.log_softmax(lowercase_ , dim=1 )
lowerCAmelCase : Union[str, Any] = head_logprob[:, -i] + tail_logprob_i
lowerCAmelCase : int = logprob_i
return out
| 693 |
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _a ( snake_case_ , snake_case_ , unittest.TestCase ):
_UpperCamelCase: Optional[Any] = IFImgaImgSuperResolutionPipeline
_UpperCamelCase: int = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"width", "height"}
_UpperCamelCase: Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"original_image"} )
_UpperCamelCase: List[str] = PipelineTesterMixin.required_optional_params - {"latents"}
def _snake_case ( self ) -> int:
return self._get_superresolution_dummy_components()
def _snake_case ( self , lowercase_ , lowercase_=0 ) -> Optional[Any]:
if str(lowercase_ ).startswith("""mps""" ):
lowerCAmelCase : Any = torch.manual_seed(lowercase_ )
else:
lowerCAmelCase : List[Any] = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
lowerCAmelCase : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase_ ) ).to(lowercase_ )
lowerCAmelCase : Optional[Any] = floats_tensor((1, 3, 16, 16) , rng=random.Random(lowercase_ ) ).to(lowercase_ )
lowerCAmelCase : Optional[int] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def _snake_case ( self ) -> Optional[int]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def _snake_case ( self ) -> int:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def _snake_case ( self ) -> Any:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def _snake_case ( self ) -> int:
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def _snake_case ( self ) -> Any:
self._test_save_load_local()
def _snake_case ( self ) -> str:
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 693 | 1 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Union[str, Any] =logging.get_logger(__name__)
lowerCAmelCase : Union[str, Any] ={
'microsoft/unispeech-sat-base-100h-libri-ft': (
'https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json'
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class _a ( snake_case_ ):
_UpperCamelCase: List[Any] = "unispeech-sat"
def __init__( self , lowercase_=32 , lowercase_=768 , lowercase_=12 , lowercase_=12 , lowercase_=3072 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=0.1 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.1 , lowercase_=0.1 , lowercase_=0.0_2 , lowercase_=1e-5 , lowercase_="group" , lowercase_="gelu" , lowercase_=(512, 512, 512, 512, 512, 512, 512) , lowercase_=(5, 2, 2, 2, 2, 2, 2) , lowercase_=(10, 3, 3, 3, 3, 2, 2) , lowercase_=False , lowercase_=128 , lowercase_=16 , lowercase_=False , lowercase_=True , lowercase_=0.0_5 , lowercase_=10 , lowercase_=2 , lowercase_=0.0 , lowercase_=10 , lowercase_=0 , lowercase_=320 , lowercase_=2 , lowercase_=0.1 , lowercase_=100 , lowercase_=256 , lowercase_=256 , lowercase_=0.1 , lowercase_="mean" , lowercase_=False , lowercase_=False , lowercase_=256 , lowercase_=(512, 512, 512, 512, 1500) , lowercase_=(5, 3, 3, 1, 1) , lowercase_=(1, 2, 3, 1, 1) , lowercase_=512 , lowercase_=0 , lowercase_=1 , lowercase_=2 , lowercase_=504 , **lowercase_ , ) -> Optional[int]:
super().__init__(**lowercase_ , pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ )
lowerCAmelCase : Optional[int] = hidden_size
lowerCAmelCase : Dict = feat_extract_norm
lowerCAmelCase : Tuple = feat_extract_activation
lowerCAmelCase : List[Any] = list(lowercase_ )
lowerCAmelCase : Any = list(lowercase_ )
lowerCAmelCase : int = list(lowercase_ )
lowerCAmelCase : Tuple = conv_bias
lowerCAmelCase : List[str] = num_conv_pos_embeddings
lowerCAmelCase : Optional[int] = num_conv_pos_embedding_groups
lowerCAmelCase : Optional[int] = len(self.conv_dim )
lowerCAmelCase : Union[str, Any] = num_hidden_layers
lowerCAmelCase : int = intermediate_size
lowerCAmelCase : List[str] = hidden_act
lowerCAmelCase : Tuple = num_attention_heads
lowerCAmelCase : Dict = hidden_dropout
lowerCAmelCase : Any = attention_dropout
lowerCAmelCase : Optional[int] = activation_dropout
lowerCAmelCase : Any = feat_proj_dropout
lowerCAmelCase : Optional[Any] = final_dropout
lowerCAmelCase : Union[str, Any] = layerdrop
lowerCAmelCase : Union[str, Any] = layer_norm_eps
lowerCAmelCase : int = initializer_range
lowerCAmelCase : Optional[int] = vocab_size
lowerCAmelCase : Union[str, Any] = num_clusters
lowerCAmelCase : Optional[int] = do_stable_layer_norm
lowerCAmelCase : Union[str, Any] = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCAmelCase : List[str] = apply_spec_augment
lowerCAmelCase : List[Any] = mask_time_prob
lowerCAmelCase : Optional[Any] = mask_time_length
lowerCAmelCase : Optional[Any] = mask_time_min_masks
lowerCAmelCase : str = mask_feature_prob
lowerCAmelCase : int = mask_feature_length
lowerCAmelCase : Tuple = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowerCAmelCase : List[str] = num_codevectors_per_group
lowerCAmelCase : int = num_codevector_groups
lowerCAmelCase : Dict = contrastive_logits_temperature
lowerCAmelCase : int = feat_quantizer_dropout
lowerCAmelCase : Union[str, Any] = num_negatives
lowerCAmelCase : List[Any] = codevector_dim
lowerCAmelCase : Tuple = proj_codevector_dim
lowerCAmelCase : List[str] = diversity_loss_weight
# ctc loss
lowerCAmelCase : Optional[int] = ctc_loss_reduction
lowerCAmelCase : Any = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
lowerCAmelCase : Optional[int] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
lowerCAmelCase : Union[str, Any] = list(lowercase_ )
lowerCAmelCase : Dict = list(lowercase_ )
lowerCAmelCase : Dict = list(lowercase_ )
lowerCAmelCase : Optional[Any] = xvector_output_dim
@property
def _snake_case ( self ) -> Dict:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 693 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Optional[int] =logging.get_logger(__name__)
lowerCAmelCase : List[Any] ={}
class _a ( snake_case_ ):
_UpperCamelCase: Tuple = "llama"
_UpperCamelCase: List[str] = ["past_key_values"]
def __init__( self , lowercase_=32000 , lowercase_=4096 , lowercase_=11008 , lowercase_=32 , lowercase_=32 , lowercase_=None , lowercase_="silu" , lowercase_=2048 , lowercase_=0.0_2 , lowercase_=1e-6 , lowercase_=True , lowercase_=0 , lowercase_=1 , lowercase_=2 , lowercase_=1 , lowercase_=False , lowercase_=None , **lowercase_ , ) -> Optional[int]:
lowerCAmelCase : List[str] = vocab_size
lowerCAmelCase : str = max_position_embeddings
lowerCAmelCase : int = hidden_size
lowerCAmelCase : Optional[int] = intermediate_size
lowerCAmelCase : int = num_hidden_layers
lowerCAmelCase : Any = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
lowerCAmelCase : Any = num_attention_heads
lowerCAmelCase : Any = num_key_value_heads
lowerCAmelCase : Any = hidden_act
lowerCAmelCase : Union[str, Any] = initializer_range
lowerCAmelCase : str = rms_norm_eps
lowerCAmelCase : int = pretraining_tp
lowerCAmelCase : int = use_cache
lowerCAmelCase : Optional[Any] = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , tie_word_embeddings=lowercase_ , **lowercase_ , )
def _snake_case ( self ) -> Dict:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , lowercase_ ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
f"""got {self.rope_scaling}""" )
lowerCAmelCase : Union[str, Any] = self.rope_scaling.get("""type""" , lowercase_ )
lowerCAmelCase : Dict = self.rope_scaling.get("""factor""" , lowercase_ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(lowercase_ , lowercase_ ) or rope_scaling_factor <= 1.0:
raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 693 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase : Any ={
'configuration_llama': ['LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LlamaConfig'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Union[str, Any] =['LlamaTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : int =['LlamaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[str] =[
'LlamaForCausalLM',
'LlamaModel',
'LlamaPreTrainedModel',
'LlamaForSequenceClassification',
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
lowerCAmelCase : Tuple =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 693 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase : int =logging.get_logger(__name__)
lowerCAmelCase : List[Any] ={
'microsoft/swin-tiny-patch4-window7-224': (
'https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json'
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class _a ( snake_case_ , snake_case_ ):
_UpperCamelCase: int = "swin"
_UpperCamelCase: str = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , lowercase_=224 , lowercase_=4 , lowercase_=3 , lowercase_=96 , lowercase_=[2, 2, 6, 2] , lowercase_=[3, 6, 12, 24] , lowercase_=7 , lowercase_=4.0 , lowercase_=True , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.1 , lowercase_="gelu" , lowercase_=False , lowercase_=0.0_2 , lowercase_=1e-5 , lowercase_=32 , lowercase_=None , lowercase_=None , **lowercase_ , ) -> Tuple:
super().__init__(**lowercase_ )
lowerCAmelCase : Optional[int] = image_size
lowerCAmelCase : Optional[Any] = patch_size
lowerCAmelCase : Optional[Any] = num_channels
lowerCAmelCase : List[Any] = embed_dim
lowerCAmelCase : str = depths
lowerCAmelCase : List[str] = len(lowercase_ )
lowerCAmelCase : Any = num_heads
lowerCAmelCase : str = window_size
lowerCAmelCase : List[str] = mlp_ratio
lowerCAmelCase : List[Any] = qkv_bias
lowerCAmelCase : List[str] = hidden_dropout_prob
lowerCAmelCase : int = attention_probs_dropout_prob
lowerCAmelCase : Any = drop_path_rate
lowerCAmelCase : int = hidden_act
lowerCAmelCase : int = use_absolute_embeddings
lowerCAmelCase : Dict = layer_norm_eps
lowerCAmelCase : Any = initializer_range
lowerCAmelCase : Dict = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCAmelCase : Any = int(embed_dim * 2 ** (len(lowercase_ ) - 1) )
lowerCAmelCase : Dict = ["""stem"""] + [f"""stage{idx}""" for idx in range(1 , len(lowercase_ ) + 1 )]
lowerCAmelCase , lowerCAmelCase : Optional[Any] = get_aligned_output_features_output_indices(
out_features=lowercase_ , out_indices=lowercase_ , stage_names=self.stage_names )
class _a ( snake_case_ ):
_UpperCamelCase: int = version.parse("1.11" )
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def _snake_case ( self ) -> float:
return 1e-4
| 693 | 1 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ = " " ):
'''simple docstring'''
lowerCAmelCase : str = []
lowerCAmelCase : str = 0
for index, char in enumerate(SCREAMING_SNAKE_CASE__ ):
if char == separator:
split_words.append(string[last_index:index] )
lowerCAmelCase : Dict = index + 1
elif index + 1 == len(SCREAMING_SNAKE_CASE__ ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 693 |
lowerCAmelCase : str ={
'Pillow': 'Pillow<10.0.0',
'accelerate': 'accelerate>=0.20.3',
'av': 'av==9.2.0',
'beautifulsoup4': 'beautifulsoup4',
'black': 'black~=23.1',
'codecarbon': 'codecarbon==1.2.0',
'cookiecutter': 'cookiecutter==1.7.3',
'dataclasses': 'dataclasses',
'datasets': 'datasets!=2.5.0',
'decord': 'decord==0.6.0',
'deepspeed': 'deepspeed>=0.9.3',
'diffusers': 'diffusers',
'dill': 'dill<0.3.5',
'evaluate': 'evaluate>=0.2.0',
'fairscale': 'fairscale>0.3',
'faiss-cpu': 'faiss-cpu',
'fastapi': 'fastapi',
'filelock': 'filelock',
'flax': 'flax>=0.4.1,<=0.7.0',
'ftfy': 'ftfy',
'fugashi': 'fugashi>=1.0',
'GitPython': 'GitPython<3.1.19',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.14.1,<1.0',
'importlib_metadata': 'importlib_metadata',
'ipadic': 'ipadic>=1.0.0,<2.0',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2,<=0.4.13',
'jaxlib': 'jaxlib>=0.1.65,<=0.4.13',
'jieba': 'jieba',
'kenlm': 'kenlm',
'keras-nlp': 'keras-nlp>=0.3.1',
'librosa': 'librosa',
'nltk': 'nltk',
'natten': 'natten>=0.14.6',
'numpy': 'numpy>=1.17',
'onnxconverter-common': 'onnxconverter-common',
'onnxruntime-tools': 'onnxruntime-tools>=1.4.2',
'onnxruntime': 'onnxruntime>=1.4.0',
'opencv-python': 'opencv-python',
'optuna': 'optuna',
'optax': 'optax>=0.0.8,<=0.1.4',
'packaging': 'packaging>=20.0',
'parameterized': 'parameterized',
'phonemizer': 'phonemizer',
'protobuf': 'protobuf',
'psutil': 'psutil',
'pyyaml': 'pyyaml>=5.1',
'pydantic': 'pydantic<2',
'pytest': 'pytest>=7.2.0',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'python': 'python>=3.8.0',
'ray[tune]': 'ray[tune]',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'rhoknp': 'rhoknp>=1.1.0,<1.3.1',
'rjieba': 'rjieba',
'rouge-score': 'rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1',
'ruff': 'ruff>=0.0.241,<=0.0.259',
'sacrebleu': 'sacrebleu>=1.4.12,<2.0.0',
'sacremoses': 'sacremoses',
'safetensors': 'safetensors>=0.3.1',
'sagemaker': 'sagemaker>=2.31.0',
'scikit-learn': 'scikit-learn',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'sigopt': 'sigopt',
'starlette': 'starlette',
'sudachipy': 'sudachipy>=0.6.6',
'sudachidict_core': 'sudachidict_core>=20220729',
'tensorflow-cpu': 'tensorflow-cpu>=2.6,<2.14',
'tensorflow': 'tensorflow>=2.6,<2.14',
'tensorflow-text': 'tensorflow-text<2.14',
'tf2onnx': 'tf2onnx',
'timeout-decorator': 'timeout-decorator',
'timm': 'timm',
'tokenizers': 'tokenizers>=0.11.1,!=0.11.3,<0.14',
'torch': 'torch>=1.9,!=1.12.0',
'torchaudio': 'torchaudio',
'torchvision': 'torchvision',
'pyctcdecode': 'pyctcdecode>=0.4.0',
'tqdm': 'tqdm>=4.27',
'unidic': 'unidic>=1.0.2',
'unidic_lite': 'unidic_lite>=1.0.7',
'urllib3': 'urllib3<2.0.0',
'uvicorn': 'uvicorn',
}
| 693 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowerCAmelCase : Any =logging.get_logger(__name__)
class _a ( snake_case_ ):
_UpperCamelCase: Optional[int] = ["pixel_values"]
def __init__( self , lowercase_ = True , lowercase_ = None , lowercase_ = PILImageResampling.BILINEAR , lowercase_ = True , lowercase_ = None , lowercase_ = True , lowercase_ = 1 / 255 , lowercase_ = True , lowercase_ = None , lowercase_ = None , **lowercase_ , ) -> None:
super().__init__(**lowercase_ )
lowerCAmelCase : Union[str, Any] = size if size is not None else {"""shortest_edge""": 256}
lowerCAmelCase : Any = get_size_dict(lowercase_ , default_to_square=lowercase_ )
lowerCAmelCase : Optional[int] = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
lowerCAmelCase : Any = get_size_dict(lowercase_ )
lowerCAmelCase : Optional[int] = do_resize
lowerCAmelCase : str = size
lowerCAmelCase : Dict = resample
lowerCAmelCase : List[Any] = do_center_crop
lowerCAmelCase : int = crop_size
lowerCAmelCase : Dict = do_rescale
lowerCAmelCase : Optional[Any] = rescale_factor
lowerCAmelCase : Dict = do_normalize
lowerCAmelCase : Optional[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase : List[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _snake_case ( self , lowercase_ , lowercase_ , lowercase_ = PILImageResampling.BICUBIC , lowercase_ = None , **lowercase_ , ) -> np.ndarray:
lowerCAmelCase : Optional[int] = get_size_dict(lowercase_ , default_to_square=lowercase_ )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
lowerCAmelCase : int = get_resize_output_image_size(lowercase_ , size=size["""shortest_edge"""] , default_to_square=lowercase_ )
return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_ )
def _snake_case ( self , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ) -> np.ndarray:
lowerCAmelCase : Optional[int] = get_size_dict(lowercase_ )
return center_crop(lowercase_ , size=(size["""height"""], size["""width"""]) , data_format=lowercase_ , **lowercase_ )
def _snake_case ( self , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ ) -> np.ndarray:
return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_ )
def _snake_case ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ) -> np.ndarray:
return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_ )
def _snake_case ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = ChannelDimension.FIRST , **lowercase_ , ) -> Tuple:
lowerCAmelCase : Optional[Any] = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase : str = size if size is not None else self.size
lowerCAmelCase : List[str] = get_size_dict(lowercase_ , default_to_square=lowercase_ )
lowerCAmelCase : Optional[Any] = resample if resample is not None else self.resample
lowerCAmelCase : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCAmelCase : Optional[Any] = crop_size if crop_size is not None else self.crop_size
lowerCAmelCase : Optional[Any] = get_size_dict(lowercase_ )
lowerCAmelCase : int = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase : int = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase : List[str] = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase : int = image_std if image_std is not None else self.image_std
lowerCAmelCase : Tuple = make_list_of_images(lowercase_ )
if not valid_images(lowercase_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
lowerCAmelCase : List[str] = [to_numpy_array(lowercase_ ) for image in images]
if do_resize:
lowerCAmelCase : str = [self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ ) for image in images]
if do_center_crop:
lowerCAmelCase : Dict = [self.center_crop(image=lowercase_ , size=lowercase_ ) for image in images]
if do_rescale:
lowerCAmelCase : Union[str, Any] = [self.rescale(image=lowercase_ , scale=lowercase_ ) for image in images]
if do_normalize:
lowerCAmelCase : str = [self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_ ) for image in images]
lowerCAmelCase : List[Any] = [to_channel_dimension_format(lowercase_ , lowercase_ ) for image in images]
lowerCAmelCase : Union[str, Any] = {"""pixel_values""": images}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_ )
| 693 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase : Union[str, Any] ={
'configuration_roformer': ['ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoFormerConfig', 'RoFormerOnnxConfig'],
'tokenization_roformer': ['RoFormerTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Dict =['RoFormerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[int] =[
'ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoFormerForCausalLM',
'RoFormerForMaskedLM',
'RoFormerForMultipleChoice',
'RoFormerForQuestionAnswering',
'RoFormerForSequenceClassification',
'RoFormerForTokenClassification',
'RoFormerLayer',
'RoFormerModel',
'RoFormerPreTrainedModel',
'load_tf_weights_in_roformer',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Tuple =[
'TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRoFormerForCausalLM',
'TFRoFormerForMaskedLM',
'TFRoFormerForMultipleChoice',
'TFRoFormerForQuestionAnswering',
'TFRoFormerForSequenceClassification',
'TFRoFormerForTokenClassification',
'TFRoFormerLayer',
'TFRoFormerModel',
'TFRoFormerPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : int =[
'FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'FlaxRoFormerForMaskedLM',
'FlaxRoFormerForMultipleChoice',
'FlaxRoFormerForQuestionAnswering',
'FlaxRoFormerForSequenceClassification',
'FlaxRoFormerForTokenClassification',
'FlaxRoFormerModel',
'FlaxRoFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Tuple =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 693 | 1 |
from __future__ import annotations
from cmath import sqrt
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if a == 0:
raise ValueError("""Coefficient 'a' must not be zero.""" )
lowerCAmelCase : Union[str, Any] = b * b - 4 * a * c
lowerCAmelCase : Optional[int] = (-b + sqrt(SCREAMING_SNAKE_CASE__ )) / (2 * a)
lowerCAmelCase : Tuple = (-b - sqrt(SCREAMING_SNAKE_CASE__ )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def _UpperCAmelCase ( ):
'''simple docstring'''
lowerCAmelCase , lowerCAmelCase : int = quadratic_roots(a=5 ,b=6 ,c=1 )
print(F"""The solutions are: {solutiona} and {solutiona}""" )
if __name__ == "__main__":
main()
| 693 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return int(input_a == input_a == 0 )
def _UpperCAmelCase ( ):
'''simple docstring'''
print("""Truth Table of NOR Gate:""" )
print("""| Input 1 | Input 2 | Output |""" )
print(F"""| 0 | 0 | {nor_gate(0 ,0 )} |""" )
print(F"""| 0 | 1 | {nor_gate(0 ,1 )} |""" )
print(F"""| 1 | 0 | {nor_gate(1 ,0 )} |""" )
print(F"""| 1 | 1 | {nor_gate(1 ,1 )} |""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 693 | 1 |
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return (data["data"], data["target"])
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : List[Any] = XGBRegressor(verbosity=0 ,random_state=4_2 )
xgb.fit(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
# Predict target for test data
lowerCAmelCase : Union[str, Any] = xgb.predict(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Optional[int] = predictions.reshape(len(SCREAMING_SNAKE_CASE__ ) ,1 )
return predictions
def _UpperCAmelCase ( ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = fetch_california_housing()
lowerCAmelCase , lowerCAmelCase : Any = data_handling(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Dict = train_test_split(
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,test_size=0.25 ,random_state=1 )
lowerCAmelCase : List[Any] = xgboost(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
# Error printing
print(F"""Mean Absolute Error : {mean_absolute_error(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )}""" )
print(F"""Mean Square Error : {mean_squared_error(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 693 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase : int ={
'configuration_poolformer': [
'POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'PoolFormerConfig',
'PoolFormerOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[Any] =['PoolFormerFeatureExtractor']
lowerCAmelCase : List[str] =['PoolFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Tuple =[
'POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PoolFormerForImageClassification',
'PoolFormerModel',
'PoolFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Any =_LazyModule(__name__, globals()['__file__'], _import_structure)
| 693 | 1 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : int =logging.get_logger(__name__)
lowerCAmelCase : int ={
'google/pix2struct-textcaps-base': (
'https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json'
),
}
class _a ( snake_case_ ):
_UpperCamelCase: int = "pix2struct_text_model"
_UpperCamelCase: int = ["past_key_values"]
_UpperCamelCase: str = {
"hidden_size": "hidden_size",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , lowercase_=50244 , lowercase_=768 , lowercase_=64 , lowercase_=2048 , lowercase_=12 , lowercase_=12 , lowercase_=32 , lowercase_=128 , lowercase_=0.1 , lowercase_=1e-6 , lowercase_=1.0 , lowercase_="gelu_new" , lowercase_=0 , lowercase_=False , lowercase_=0 , lowercase_=1 , lowercase_=False , lowercase_=True , **lowercase_ , ) -> Union[str, Any]:
lowerCAmelCase : Any = vocab_size
lowerCAmelCase : Union[str, Any] = hidden_size
lowerCAmelCase : Union[str, Any] = d_kv
lowerCAmelCase : Tuple = d_ff
lowerCAmelCase : Dict = num_layers
lowerCAmelCase : Union[str, Any] = num_heads
lowerCAmelCase : Union[str, Any] = relative_attention_num_buckets
lowerCAmelCase : Union[str, Any] = relative_attention_max_distance
lowerCAmelCase : Optional[int] = dropout_rate
lowerCAmelCase : Dict = layer_norm_epsilon
lowerCAmelCase : Tuple = initializer_factor
lowerCAmelCase : Dict = use_cache
lowerCAmelCase : List[Any] = eos_token_id
lowerCAmelCase : str = decoder_start_token_id
# for backwards compatibility
lowerCAmelCase : Any = dense_act_fn
super().__init__(
pad_token_id=lowercase_ , eos_token_id=lowercase_ , decoder_start_token_id=lowercase_ , tie_word_embeddings=lowercase_ , is_decoder=lowercase_ , **lowercase_ , )
@classmethod
def _snake_case ( cls , lowercase_ , **lowercase_ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowercase_ )
lowerCAmelCase , lowerCAmelCase : Tuple = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
lowerCAmelCase : Optional[int] = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowercase_ , **lowercase_ )
class _a ( snake_case_ ):
_UpperCamelCase: str = "pix2struct_vision_model"
def __init__( self , lowercase_=768 , lowercase_=768 , lowercase_=2048 , lowercase_=64 , lowercase_=12 , lowercase_=12 , lowercase_="gelu_new" , lowercase_=1e-6 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=1e-10 , lowercase_=1.0 , lowercase_=4096 , lowercase_=32 , lowercase_=128 , **lowercase_ , ) -> Optional[int]:
super().__init__(**lowercase_ )
lowerCAmelCase : Optional[Any] = hidden_size
lowerCAmelCase : Any = patch_embed_hidden_size
lowerCAmelCase : int = d_ff
lowerCAmelCase : str = dropout_rate
lowerCAmelCase : Tuple = num_hidden_layers
lowerCAmelCase : Optional[Any] = num_attention_heads
lowerCAmelCase : int = initializer_range
lowerCAmelCase : Dict = initializer_factor
lowerCAmelCase : Dict = attention_dropout
lowerCAmelCase : Dict = layer_norm_eps
lowerCAmelCase : Dict = dense_act_fn
lowerCAmelCase : int = seq_len
lowerCAmelCase : Tuple = relative_attention_num_buckets
lowerCAmelCase : Dict = relative_attention_max_distance
lowerCAmelCase : Any = d_kv
@classmethod
def _snake_case ( cls , lowercase_ , **lowercase_ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowercase_ )
lowerCAmelCase , lowerCAmelCase : Dict = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
lowerCAmelCase : Union[str, Any] = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowercase_ , **lowercase_ )
class _a ( snake_case_ ):
_UpperCamelCase: Tuple = "pix2struct"
_UpperCamelCase: Tuple = True
def __init__( self , lowercase_=None , lowercase_=None , lowercase_=1.0 , lowercase_=0.0_2 , lowercase_=False , lowercase_=False , lowercase_=True , **lowercase_ , ) -> Optional[Any]:
super().__init__(tie_word_embeddings=lowercase_ , is_encoder_decoder=lowercase_ , **lowercase_ )
if text_config is None:
lowerCAmelCase : Optional[Any] = {}
logger.info("""text_config is None. Initializing the Pix2StructTextConfig with default values.""" )
if vision_config is None:
lowerCAmelCase : Optional[int] = {}
logger.info("""vision_config is None. Initializing the Pix2StructVisionConfig with default values.""" )
lowerCAmelCase : Dict = PixaStructTextConfig(**lowercase_ )
lowerCAmelCase : List[Any] = PixaStructVisionConfig(**lowercase_ )
lowerCAmelCase : Tuple = self.text_config.decoder_start_token_id
lowerCAmelCase : Dict = self.text_config.pad_token_id
lowerCAmelCase : str = self.text_config.eos_token_id
lowerCAmelCase : Union[str, Any] = initializer_factor
lowerCAmelCase : int = initializer_range
lowerCAmelCase : Optional[Any] = self.initializer_range
lowerCAmelCase : Optional[int] = self.initializer_range
lowerCAmelCase : Optional[Any] = is_vqa
@classmethod
def _snake_case ( cls , lowercase_ , lowercase_ , **lowercase_ ) -> Dict:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowercase_ )
def _snake_case ( self ) -> Tuple:
lowerCAmelCase : Optional[int] = copy.deepcopy(self.__dict__ )
lowerCAmelCase : Dict = self.text_config.to_dict()
lowerCAmelCase : Tuple = self.vision_config.to_dict()
lowerCAmelCase : Union[str, Any] = self.__class__.model_type
return output
| 693 |
import os
import string
import sys
lowerCAmelCase : Optional[int] =1 << 8
lowerCAmelCase : List[Any] ={
'tab': ord('\t'),
'newline': ord('\r'),
'esc': 27,
'up': 65 + ARROW_KEY_FLAG,
'down': 66 + ARROW_KEY_FLAG,
'right': 67 + ARROW_KEY_FLAG,
'left': 68 + ARROW_KEY_FLAG,
'mod_int': 91,
'undefined': sys.maxsize,
'interrupt': 3,
'insert': 50,
'delete': 51,
'pg_up': 53,
'pg_down': 54,
}
lowerCAmelCase : Optional[Any] =KEYMAP['up']
lowerCAmelCase : Tuple =KEYMAP['left']
if sys.platform == "win32":
lowerCAmelCase : Dict =[]
lowerCAmelCase : int ={
b'\xe0H': KEYMAP['up'] - ARROW_KEY_FLAG,
b'\x00H': KEYMAP['up'] - ARROW_KEY_FLAG,
b'\xe0P': KEYMAP['down'] - ARROW_KEY_FLAG,
b'\x00P': KEYMAP['down'] - ARROW_KEY_FLAG,
b'\xe0M': KEYMAP['right'] - ARROW_KEY_FLAG,
b'\x00M': KEYMAP['right'] - ARROW_KEY_FLAG,
b'\xe0K': KEYMAP['left'] - ARROW_KEY_FLAG,
b'\x00K': KEYMAP['left'] - ARROW_KEY_FLAG,
}
for i in range(10):
lowerCAmelCase : Optional[Any] =ord(str(i))
def _UpperCAmelCase ( ):
'''simple docstring'''
if os.name == "nt":
import msvcrt
lowerCAmelCase : Any = """mbcs"""
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(SCREAMING_SNAKE_CASE__ ) == 0:
# Read the keystroke
lowerCAmelCase : int = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
lowerCAmelCase : Tuple = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
lowerCAmelCase : str = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP["""mod_int"""] ) )
WIN_CH_BUFFER.append(SCREAMING_SNAKE_CASE__ )
if ord(SCREAMING_SNAKE_CASE__ ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(1_2_6 ) )
lowerCAmelCase : Optional[Any] = chr(KEYMAP["""esc"""] )
except KeyError:
lowerCAmelCase : Optional[int] = cha[1]
else:
lowerCAmelCase : Any = ch.decode(SCREAMING_SNAKE_CASE__ )
else:
lowerCAmelCase : Optional[int] = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
lowerCAmelCase : List[Any] = sys.stdin.fileno()
lowerCAmelCase : str = termios.tcgetattr(SCREAMING_SNAKE_CASE__ )
try:
tty.setraw(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Optional[Any] = sys.stdin.read(1 )
finally:
termios.tcsetattr(SCREAMING_SNAKE_CASE__ ,termios.TCSADRAIN ,SCREAMING_SNAKE_CASE__ )
return ch
def _UpperCAmelCase ( ):
'''simple docstring'''
lowerCAmelCase : Any = get_raw_chars()
if ord(SCREAMING_SNAKE_CASE__ ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(SCREAMING_SNAKE_CASE__ ) == KEYMAP["esc"]:
lowerCAmelCase : int = get_raw_chars()
if ord(SCREAMING_SNAKE_CASE__ ) == KEYMAP["mod_int"]:
lowerCAmelCase : Tuple = get_raw_chars()
if ord(SCREAMING_SNAKE_CASE__ ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(SCREAMING_SNAKE_CASE__ ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(SCREAMING_SNAKE_CASE__ ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 693 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class _a ( snake_case_ ):
_UpperCamelCase: Tuple = "dandelin/vilt-b32-finetuned-vqa"
_UpperCamelCase: Optional[int] = (
"This is a tool that answers a question about an image. It takes an input named `image` which should be the "
"image containing the information, as well as a `question` which should be the question in English. It "
"returns a text that is the answer to the question."
)
_UpperCamelCase: str = "image_qa"
_UpperCamelCase: Dict = AutoProcessor
_UpperCamelCase: int = AutoModelForVisualQuestionAnswering
_UpperCamelCase: List[Any] = ["image", "text"]
_UpperCamelCase: Union[str, Any] = ["text"]
def __init__( self , *lowercase_ , **lowercase_ ) -> List[str]:
requires_backends(self , ["""vision"""] )
super().__init__(*lowercase_ , **lowercase_ )
def _snake_case ( self , lowercase_ , lowercase_ ) -> Union[str, Any]:
return self.pre_processor(lowercase_ , lowercase_ , return_tensors="""pt""" )
def _snake_case ( self , lowercase_ ) -> int:
with torch.no_grad():
return self.model(**lowercase_ ).logits
def _snake_case ( self , lowercase_ ) -> List[Any]:
lowerCAmelCase : List[Any] = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 693 |
# Imports
import numpy as np
class _a :
def __init__( self , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None ) -> List[Any]:
self.set_matricies(red=lowercase_ , green=lowercase_ , blue=lowercase_ , red_edge=lowercase_ , nir=lowercase_ )
def _snake_case ( self , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None ) -> Union[str, Any]:
if red is not None:
lowerCAmelCase : str = red
if green is not None:
lowerCAmelCase : Optional[int] = green
if blue is not None:
lowerCAmelCase : Optional[int] = blue
if red_edge is not None:
lowerCAmelCase : Tuple = red_edge
if nir is not None:
lowerCAmelCase : Union[str, Any] = nir
return True
def _snake_case ( self , lowercase_="" , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None ) -> Optional[int]:
self.set_matricies(red=lowercase_ , green=lowercase_ , blue=lowercase_ , red_edge=lowercase_ , nir=lowercase_ )
lowerCAmelCase : int = {
"""ARVI2""": self.arvaa,
"""CCCI""": self.ccci,
"""CVI""": self.cvi,
"""GLI""": self.gli,
"""NDVI""": self.ndvi,
"""BNDVI""": self.bndvi,
"""redEdgeNDVI""": self.red_edge_ndvi,
"""GNDVI""": self.gndvi,
"""GBNDVI""": self.gbndvi,
"""GRNDVI""": self.grndvi,
"""RBNDVI""": self.rbndvi,
"""PNDVI""": self.pndvi,
"""ATSAVI""": self.atsavi,
"""BWDRVI""": self.bwdrvi,
"""CIgreen""": self.ci_green,
"""CIrededge""": self.ci_rededge,
"""CI""": self.ci,
"""CTVI""": self.ctvi,
"""GDVI""": self.gdvi,
"""EVI""": self.evi,
"""GEMI""": self.gemi,
"""GOSAVI""": self.gosavi,
"""GSAVI""": self.gsavi,
"""Hue""": self.hue,
"""IVI""": self.ivi,
"""IPVI""": self.ipvi,
"""I""": self.i,
"""RVI""": self.rvi,
"""MRVI""": self.mrvi,
"""MSAVI""": self.m_savi,
"""NormG""": self.norm_g,
"""NormNIR""": self.norm_nir,
"""NormR""": self.norm_r,
"""NGRDI""": self.ngrdi,
"""RI""": self.ri,
"""S""": self.s,
"""IF""": self._if,
"""DVI""": self.dvi,
"""TVI""": self.tvi,
"""NDRE""": self.ndre,
}
try:
return funcs[index]()
except KeyError:
print("""Index not in the list!""" )
return False
def _snake_case ( self ) -> Dict:
return -0.1_8 + (1.1_7 * ((self.nir - self.red) / (self.nir + self.red)))
def _snake_case ( self ) -> Optional[Any]:
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def _snake_case ( self ) -> List[str]:
return self.nir * (self.red / (self.green**2))
def _snake_case ( self ) -> Tuple:
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def _snake_case ( self ) -> Optional[int]:
return (self.nir - self.red) / (self.nir + self.red)
def _snake_case ( self ) -> List[str]:
return (self.nir - self.blue) / (self.nir + self.blue)
def _snake_case ( self ) -> int:
return (self.redEdge - self.red) / (self.redEdge + self.red)
def _snake_case ( self ) -> Optional[Any]:
return (self.nir - self.green) / (self.nir + self.green)
def _snake_case ( self ) -> Tuple:
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def _snake_case ( self ) -> Tuple:
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def _snake_case ( self ) -> int:
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def _snake_case ( self ) -> List[str]:
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def _snake_case ( self , lowercase_=0.0_8 , lowercase_=1.2_2 , lowercase_=0.0_3 ) -> int:
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def _snake_case ( self ) -> Optional[Any]:
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def _snake_case ( self ) -> Any:
return (self.nir / self.green) - 1
def _snake_case ( self ) -> List[Any]:
return (self.nir / self.redEdge) - 1
def _snake_case ( self ) -> str:
return (self.red - self.blue) / self.red
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase : Dict = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def _snake_case ( self ) -> Optional[Any]:
return self.nir - self.green
def _snake_case ( self ) -> int:
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def _snake_case ( self ) -> Optional[Any]:
lowerCAmelCase : Tuple = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.2_5 * n) - (self.red - 0.1_2_5) / (1 - self.red)
def _snake_case ( self , lowercase_=0.1_6 ) -> Optional[int]:
return (self.nir - self.green) / (self.nir + self.green + y)
def _snake_case ( self , lowercase_=0.5 ) -> List[str]:
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def _snake_case ( self ) -> Any:
return np.arctan(
((2 * self.red - self.green - self.blue) / 3_0.5) * (self.green - self.blue) )
def _snake_case ( self , lowercase_=None , lowercase_=None ) -> List[Any]:
return (self.nir - b) / (a * self.red)
def _snake_case ( self ) -> Any:
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def _snake_case ( self ) -> str:
return (self.red + self.green + self.blue) / 3_0.5
def _snake_case ( self ) -> Union[str, Any]:
return self.nir / self.red
def _snake_case ( self ) -> Tuple:
return (self.rvi() - 1) / (self.rvi() + 1)
def _snake_case ( self ) -> Dict:
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def _snake_case ( self ) -> List[Any]:
return self.green / (self.nir + self.red + self.green)
def _snake_case ( self ) -> int:
return self.nir / (self.nir + self.red + self.green)
def _snake_case ( self ) -> Dict:
return self.red / (self.nir + self.red + self.green)
def _snake_case ( self ) -> List[Any]:
return (self.green - self.red) / (self.green + self.red)
def _snake_case ( self ) -> Optional[int]:
return (self.red - self.green) / (self.red + self.green)
def _snake_case ( self ) -> Tuple:
lowerCAmelCase : Any = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
lowerCAmelCase : Dict = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def _snake_case ( self ) -> int:
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def _snake_case ( self ) -> List[str]:
return self.nir / self.red
def _snake_case ( self ) -> int:
return (self.ndvi() + 0.5) ** (1 / 2)
def _snake_case ( self ) -> str:
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 693 | 1 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
lowerCAmelCase : str =logging.get_logger(__name__)
lowerCAmelCase : Optional[int] ={
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json',
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class _a ( snake_case_ ):
_UpperCamelCase: Optional[Any] = "blenderbot-small"
_UpperCamelCase: str = ["past_key_values"]
_UpperCamelCase: Optional[Any] = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , lowercase_=50265 , lowercase_=512 , lowercase_=8 , lowercase_=2048 , lowercase_=16 , lowercase_=8 , lowercase_=2048 , lowercase_=16 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=True , lowercase_=True , lowercase_="gelu" , lowercase_=512 , lowercase_=0.1 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.0_2 , lowercase_=1 , lowercase_=False , lowercase_=0 , lowercase_=1 , lowercase_=2 , lowercase_=2 , **lowercase_ , ) -> str:
lowerCAmelCase : List[str] = vocab_size
lowerCAmelCase : Dict = max_position_embeddings
lowerCAmelCase : List[Any] = d_model
lowerCAmelCase : int = encoder_ffn_dim
lowerCAmelCase : Union[str, Any] = encoder_layers
lowerCAmelCase : List[Any] = encoder_attention_heads
lowerCAmelCase : str = decoder_ffn_dim
lowerCAmelCase : Union[str, Any] = decoder_layers
lowerCAmelCase : Tuple = decoder_attention_heads
lowerCAmelCase : int = dropout
lowerCAmelCase : Tuple = attention_dropout
lowerCAmelCase : List[Any] = activation_dropout
lowerCAmelCase : Optional[int] = activation_function
lowerCAmelCase : str = init_std
lowerCAmelCase : str = encoder_layerdrop
lowerCAmelCase : Tuple = decoder_layerdrop
lowerCAmelCase : str = use_cache
lowerCAmelCase : int = encoder_layers
lowerCAmelCase : Optional[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , is_encoder_decoder=lowercase_ , decoder_start_token_id=lowercase_ , forced_eos_token_id=lowercase_ , **lowercase_ , )
class _a ( snake_case_ ):
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
lowerCAmelCase : Tuple = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
lowerCAmelCase : List[str] = {0: """batch"""}
lowerCAmelCase : int = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
lowerCAmelCase : Dict = {0: """batch""", 1: """decoder_sequence"""}
lowerCAmelCase : Union[str, Any] = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(lowercase_ , direction="""inputs""" )
elif self.task == "causal-lm":
# TODO: figure this case out.
lowerCAmelCase : Optional[int] = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
lowerCAmelCase , lowerCAmelCase : Optional[Any] = self.num_layers
for i in range(lowercase_ ):
lowerCAmelCase : Optional[Any] = {0: """batch""", 2: """past_sequence + sequence"""}
lowerCAmelCase : str = {0: """batch""", 2: """past_sequence + sequence"""}
else:
lowerCAmelCase : List[Any] = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}),
("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}),
] )
return common_inputs
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
lowerCAmelCase : Optional[int] = super().outputs
else:
lowerCAmelCase : int = super(lowercase_ , self ).outputs
if self.use_past:
lowerCAmelCase , lowerCAmelCase : List[Any] = self.num_layers
for i in range(lowercase_ ):
lowerCAmelCase : Optional[int] = {0: """batch""", 2: """past_sequence + sequence"""}
lowerCAmelCase : Optional[Any] = {0: """batch""", 2: """past_sequence + sequence"""}
return common_outputs
def _snake_case ( self , lowercase_ , lowercase_ = -1 , lowercase_ = -1 , lowercase_ = False , lowercase_ = None , ) -> Mapping[str, Any]:
lowerCAmelCase : int = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# Generate decoder inputs
lowerCAmelCase : str = seq_length if not self.use_past else 1
lowerCAmelCase : Optional[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
lowerCAmelCase : Any = {f"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
lowerCAmelCase : Optional[Any] = dict(**lowercase_ , **lowercase_ )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
lowerCAmelCase , lowerCAmelCase : List[Any] = common_inputs["""input_ids"""].shape
lowerCAmelCase : List[str] = common_inputs["""decoder_input_ids"""].shape[1]
lowerCAmelCase , lowerCAmelCase : List[str] = self.num_attention_heads
lowerCAmelCase : List[Any] = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowerCAmelCase : int = decoder_seq_length + 3
lowerCAmelCase : int = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
lowerCAmelCase : List[str] = torch.cat(
[common_inputs["""decoder_attention_mask"""], torch.ones(lowercase_ , lowercase_ )] , dim=1 )
lowerCAmelCase : List[str] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
lowerCAmelCase , lowerCAmelCase : Optional[Any] = self.num_layers
lowerCAmelCase : Tuple = min(lowercase_ , lowercase_ )
lowerCAmelCase : Dict = max(lowercase_ , lowercase_ ) - min_num_layers
lowerCAmelCase : Any = """encoder""" if num_encoder_layers > num_decoder_layers else """decoder"""
for _ in range(lowercase_ ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowercase_ ),
torch.zeros(lowercase_ ),
torch.zeros(lowercase_ ),
torch.zeros(lowercase_ ),
) )
# TODO: test this.
lowerCAmelCase : Optional[int] = encoder_shape if remaining_side_name == """encoder""" else decoder_shape
for _ in range(lowercase_ , lowercase_ ):
common_inputs["past_key_values"].append((torch.zeros(lowercase_ ), torch.zeros(lowercase_ )) )
return common_inputs
def _snake_case ( self , lowercase_ , lowercase_ = -1 , lowercase_ = -1 , lowercase_ = False , lowercase_ = None , ) -> Mapping[str, Any]:
lowerCAmelCase : str = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
lowerCAmelCase , lowerCAmelCase : int = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
lowerCAmelCase : List[Any] = seqlen + 2
lowerCAmelCase , lowerCAmelCase : List[str] = self.num_layers
lowerCAmelCase , lowerCAmelCase : str = self.num_attention_heads
lowerCAmelCase : Any = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowerCAmelCase : int = common_inputs["""attention_mask"""].dtype
lowerCAmelCase : List[Any] = torch.cat(
[common_inputs["""attention_mask"""], torch.ones(lowercase_ , lowercase_ , dtype=lowercase_ )] , dim=1 )
lowerCAmelCase : str = [
(torch.zeros(lowercase_ ), torch.zeros(lowercase_ )) for _ in range(lowercase_ )
]
return common_inputs
def _snake_case ( self , lowercase_ , lowercase_ = -1 , lowercase_ = -1 , lowercase_ = False , lowercase_ = None , ) -> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowerCAmelCase : List[Any] = compute_effective_axis_dimension(
lowercase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowerCAmelCase : Optional[Any] = tokenizer.num_special_tokens_to_add(lowercase_ )
lowerCAmelCase : List[Any] = compute_effective_axis_dimension(
lowercase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowercase_ )
# Generate dummy inputs according to compute batch and sequence
lowerCAmelCase : List[str] = [""" """.join([tokenizer.unk_token] ) * seq_length] * batch_size
lowerCAmelCase : Optional[Any] = dict(tokenizer(lowercase_ , return_tensors=lowercase_ ) )
return common_inputs
def _snake_case ( self , lowercase_ , lowercase_ = -1 , lowercase_ = -1 , lowercase_ = False , lowercase_ = None , ) -> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
lowerCAmelCase : Any = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowercase_ , batch_size=lowercase_ , seq_length=lowercase_ , is_pair=lowercase_ , framework=lowercase_ )
elif self.task == "causal-lm":
lowerCAmelCase : int = self._generate_dummy_inputs_for_causal_lm(
lowercase_ , batch_size=lowercase_ , seq_length=lowercase_ , is_pair=lowercase_ , framework=lowercase_ )
else:
lowerCAmelCase : Union[str, Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase_ , batch_size=lowercase_ , seq_length=lowercase_ , is_pair=lowercase_ , framework=lowercase_ )
return common_inputs
def _snake_case ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Optional[int]:
if self.task in ["default", "seq2seq-lm"]:
lowerCAmelCase : int = super()._flatten_past_key_values_(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
else:
lowerCAmelCase : Dict = super(lowercase_ , self )._flatten_past_key_values_(
lowercase_ , lowercase_ , lowercase_ , lowercase_ )
| 693 |
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : List[str] = None
if token is not None:
lowerCAmelCase : Union[str, Any] = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"""Bearer {token}"""}
lowerCAmelCase : Optional[Any] = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"""
lowerCAmelCase : Any = requests.get(SCREAMING_SNAKE_CASE__ ,headers=SCREAMING_SNAKE_CASE__ ).json()
lowerCAmelCase : List[str] = {}
try:
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
lowerCAmelCase : int = math.ceil((result["""total_count"""] - 1_0_0) / 1_0_0 )
for i in range(SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase : List[str] = requests.get(url + F"""&page={i + 2}""" ,headers=SCREAMING_SNAKE_CASE__ ).json()
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return job_links
except Exception:
print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = None
if token is not None:
lowerCAmelCase : str = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"""Bearer {token}"""}
lowerCAmelCase : Optional[int] = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"""
lowerCAmelCase : Optional[int] = requests.get(SCREAMING_SNAKE_CASE__ ,headers=SCREAMING_SNAKE_CASE__ ).json()
lowerCAmelCase : List[str] = {}
try:
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
lowerCAmelCase : Optional[int] = math.ceil((result["""total_count"""] - 1_0_0) / 1_0_0 )
for i in range(SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase : int = requests.get(url + F"""&page={i + 2}""" ,headers=SCREAMING_SNAKE_CASE__ ).json()
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
return artifacts
except Exception:
print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Dict = None
if token is not None:
lowerCAmelCase : Optional[Any] = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"""Bearer {token}"""}
lowerCAmelCase : str = requests.get(SCREAMING_SNAKE_CASE__ ,headers=SCREAMING_SNAKE_CASE__ ,allow_redirects=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Union[str, Any] = result.headers["""Location"""]
lowerCAmelCase : Optional[int] = requests.get(SCREAMING_SNAKE_CASE__ ,allow_redirects=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Union[str, Any] = os.path.join(SCREAMING_SNAKE_CASE__ ,F"""{artifact_name}.zip""" )
with open(SCREAMING_SNAKE_CASE__ ,"""wb""" ) as fp:
fp.write(response.content )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : int = []
lowerCAmelCase : Optional[int] = []
lowerCAmelCase : Optional[int] = None
with zipfile.ZipFile(SCREAMING_SNAKE_CASE__ ) as z:
for filename in z.namelist():
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(SCREAMING_SNAKE_CASE__ ) as f:
for line in f:
lowerCAmelCase : Optional[Any] = line.decode("""UTF-8""" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
lowerCAmelCase : str = line[: line.index(""": """ )]
lowerCAmelCase : Optional[int] = line[line.index(""": """ ) + len(""": """ ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("""FAILED """ ):
# `test` is the test method that failed
lowerCAmelCase : Union[str, Any] = line[len("""FAILED """ ) :]
failed_tests.append(SCREAMING_SNAKE_CASE__ )
elif filename == "job_name.txt":
lowerCAmelCase : Union[str, Any] = line
if len(SCREAMING_SNAKE_CASE__ ) != len(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
F"""`errors` and `failed_tests` should have the same number of elements. Got {len(SCREAMING_SNAKE_CASE__ )} for `errors` """
F"""and {len(SCREAMING_SNAKE_CASE__ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"""
""" problem.""" )
lowerCAmelCase : Optional[int] = None
if job_name and job_links:
lowerCAmelCase : Optional[int] = job_links.get(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
# A list with elements of the form (line of error, error, failed test)
lowerCAmelCase : Union[str, Any] = [x + [y] + [job_link] for x, y in zip(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )]
return result
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : str = []
lowerCAmelCase : Union[str, Any] = [os.path.join(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) for p in os.listdir(SCREAMING_SNAKE_CASE__ ) if p.endswith(""".zip""" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(SCREAMING_SNAKE_CASE__ ,job_links=SCREAMING_SNAKE_CASE__ ) )
return errors
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : int = Counter()
counter.update([x[1] for x in logs] )
lowerCAmelCase : List[str] = counter.most_common()
lowerCAmelCase : Union[str, Any] = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
lowerCAmelCase : List[Any] = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]}
lowerCAmelCase : int = dict(sorted(r.items() ,key=lambda SCREAMING_SNAKE_CASE__ : item[1]["count"] ,reverse=SCREAMING_SNAKE_CASE__ ) )
return r
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = test.split("""::""" )[0]
if test.startswith("""tests/models/""" ):
lowerCAmelCase : str = test.split("""/""" )[2]
else:
lowerCAmelCase : List[Any] = None
return test
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : List[Any] = [(x[0], x[1], get_model(x[2] )) for x in logs]
lowerCAmelCase : int = [x for x in logs if x[2] is not None]
lowerCAmelCase : Optional[Any] = {x[2] for x in logs}
lowerCAmelCase : Dict = {}
for test in tests:
lowerCAmelCase : Optional[int] = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
lowerCAmelCase : Tuple = counter.most_common()
lowerCAmelCase : Union[str, Any] = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
lowerCAmelCase : List[Any] = sum(error_counts.values() )
if n_errors > 0:
lowerCAmelCase : Optional[int] = {"""count""": n_errors, """errors""": error_counts}
lowerCAmelCase : Any = dict(sorted(r.items() ,key=lambda SCREAMING_SNAKE_CASE__ : item[1]["count"] ,reverse=SCREAMING_SNAKE_CASE__ ) )
return r
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = """| no. | error | status |"""
lowerCAmelCase : List[Any] = """|-:|:-|:-|"""
lowerCAmelCase : Union[str, Any] = [header, sep]
for error in reduced_by_error:
lowerCAmelCase : List[str] = reduced_by_error[error]["""count"""]
lowerCAmelCase : Any = F"""| {count} | {error[:1_0_0]} | |"""
lines.append(SCREAMING_SNAKE_CASE__ )
return "\n".join(SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : str = """| model | no. of errors | major error | count |"""
lowerCAmelCase : Any = """|-:|-:|-:|-:|"""
lowerCAmelCase : str = [header, sep]
for model in reduced_by_model:
lowerCAmelCase : Any = reduced_by_model[model]["""count"""]
lowerCAmelCase , lowerCAmelCase : Optional[int] = list(reduced_by_model[model]["""errors"""].items() )[0]
lowerCAmelCase : Optional[Any] = F"""| {model} | {count} | {error[:6_0]} | {_count} |"""
lines.append(SCREAMING_SNAKE_CASE__ )
return "\n".join(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
lowerCAmelCase : int =argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
lowerCAmelCase : Dict =parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
lowerCAmelCase : Optional[int] =get_job_links(args.workflow_run_id, token=args.token)
lowerCAmelCase : List[Any] ={}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
lowerCAmelCase : str =k.find(' / ')
lowerCAmelCase : Any =k[index + len(' / ') :]
lowerCAmelCase : str =v
with open(os.path.join(args.output_dir, 'job_links.json'), 'w', encoding='UTF-8') as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
lowerCAmelCase : Any =get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
lowerCAmelCase : List[Any] =get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
lowerCAmelCase : str =Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
lowerCAmelCase : int =counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, 'errors.json'), 'w', encoding='UTF-8') as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
lowerCAmelCase : Optional[int] =reduce_by_error(errors)
lowerCAmelCase : Tuple =reduce_by_model(errors)
lowerCAmelCase : Optional[Any] =make_github_table(reduced_by_error)
lowerCAmelCase : Union[str, Any] =make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, 'reduced_by_error.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, 'reduced_by_model.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
| 693 | 1 |
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def _UpperCAmelCase ( ):
'''simple docstring'''
raise RuntimeError("""CUDA out of memory.""" )
class _a ( nn.Module ):
def __init__( self ) -> int:
super().__init__()
lowerCAmelCase : Any = nn.Linear(3 , 4 )
lowerCAmelCase : int = nn.BatchNormad(4 )
lowerCAmelCase : List[Any] = nn.Linear(4 , 5 )
def _snake_case ( self , lowercase_ ) -> Union[str, Any]:
return self.lineara(self.batchnorm(self.lineara(lowercase_ ) ) )
class _a ( unittest.TestCase ):
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase : str = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(lowercase_ ):
nonlocal batch_sizes
batch_sizes.append(lowercase_ )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(lowercase_ , [128, 64, 32, 16, 8] )
def _snake_case ( self ) -> List[Any]:
lowerCAmelCase : Tuple = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(lowercase_ , lowercase_ ):
nonlocal batch_sizes
batch_sizes.append(lowercase_ )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
lowerCAmelCase , lowerCAmelCase : List[Any] = mock_training_loop_function("""hello""" )
self.assertListEqual(lowercase_ , [128, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, """hello"""] )
def _snake_case ( self ) -> List[Any]:
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(lowercase_ ):
pass
with self.assertRaises(lowercase_ ) as cm:
mock_training_loop_function()
self.assertIn("""No executable batch size found, reached zero.""" , cm.exception.args[0] )
def _snake_case ( self ) -> Dict:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(lowercase_ ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(lowercase_ ) as cm:
mock_training_loop_function()
self.assertIn("""No executable batch size found, reached zero.""" , cm.exception.args[0] )
def _snake_case ( self ) -> List[Any]:
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(lowercase_ , lowercase_ , lowercase_ ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(lowercase_ ) as cm:
mock_training_loop_function(128 , """hello""" , """world""" )
self.assertIn("""Batch size was passed into `f`""" , cm.exception.args[0] )
self.assertIn("""`f(arg1='hello', arg2='world')""" , cm.exception.args[0] )
def _snake_case ( self ) -> Optional[int]:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(lowercase_ ):
raise ValueError("""Oops, we had an error!""" )
with self.assertRaises(lowercase_ ) as cm:
mock_training_loop_function()
self.assertIn("""Oops, we had an error!""" , cm.exception.args[0] )
@require_cuda
def _snake_case ( self ) -> str:
lowerCAmelCase : Optional[Any] = torch.cuda.memory_allocated()
lowerCAmelCase : Optional[Any] = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , lowercase_ )
lowerCAmelCase : int = release_memory(lowercase_ )
self.assertEqual(torch.cuda.memory_allocated() , lowercase_ )
| 693 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase : Optional[int] ={
'configuration_autoformer': [
'AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AutoformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Dict =[
'AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'AutoformerForPrediction',
'AutoformerModel',
'AutoformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Any =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 693 | 1 |
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
lowerCAmelCase : int =[
# tf -> hf
('/', '.'),
('layer_', 'layers.'),
('kernel', 'weight'),
('beta', 'bias'),
('gamma', 'weight'),
('pegasus', 'model'),
]
lowerCAmelCase : Dict =[
('.output.dense', '.fc2'),
('intermediate.LayerNorm', 'final_layer_norm'),
('intermediate.dense', 'fc1'),
]
lowerCAmelCase : str =(
INIT_COMMON
+ [
('attention.self.LayerNorm', 'self_attn_layer_norm'),
('attention.output.dense', 'self_attn.out_proj'),
('attention.self', 'self_attn'),
('attention.encdec.LayerNorm', 'encoder_attn_layer_norm'),
('attention.encdec_output.dense', 'encoder_attn.out_proj'),
('attention.encdec', 'encoder_attn'),
('key', 'k_proj'),
('value', 'v_proj'),
('query', 'q_proj'),
('decoder.LayerNorm', 'decoder.layernorm_embedding'),
]
+ END_COMMON
)
lowerCAmelCase : Dict =(
INIT_COMMON
+ [
('embeddings.word_embeddings', 'shared.weight'),
('embeddings.position_embeddings', 'embed_positions.weight'),
('attention.self.LayerNorm', 'self_attn_layer_norm'),
('attention.output.dense', 'self_attn.output'),
('attention.self', 'self_attn.self'),
('encoder.LayerNorm', 'encoder.layernorm_embedding'),
]
+ END_COMMON
)
lowerCAmelCase : Optional[int] =[
'encdec/key/bias',
'encdec/query/bias',
'encdec/value/bias',
'self/key/bias',
'self/query/bias',
'self/value/bias',
'encdec_output/dense/bias',
'attention/output/dense/bias',
]
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
for tf_name, hf_name in patterns:
lowerCAmelCase : List[Any] = k.replace(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
return k
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : List[Any] = BigBirdPegasusConfig(**SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Dict = BigBirdPegasusForConditionalGeneration(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : List[str] = torch_model.state_dict()
lowerCAmelCase : Tuple = {}
# separating decoder weights
lowerCAmelCase : Optional[Any] = {k: tf_weights[k] for k in tf_weights if k.startswith("""pegasus/decoder""" )}
lowerCAmelCase : Tuple = {k: tf_weights[k] for k in tf_weights if not k.startswith("""pegasus/decoder""" )}
for k, v in tqdm(decoder_weights.items() ,"""tf -> hf conversion""" ):
lowerCAmelCase : Optional[int] = [k.endswith(SCREAMING_SNAKE_CASE__ ) for ending in KEYS_TO_IGNORE]
if any(SCREAMING_SNAKE_CASE__ ):
continue
lowerCAmelCase : Union[str, Any] = DECODER_PATTERNS
lowerCAmelCase : List[str] = rename_state_dict_key(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
if new_k not in state_dict:
raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ["""dense""", """query""", """key""", """value"""] ):
lowerCAmelCase : str = v.T
lowerCAmelCase : Dict = torch.from_numpy(SCREAMING_SNAKE_CASE__ )
assert v.shape == state_dict[new_k].shape, F"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
for k, v in tqdm(remaining_weights.items() ,"""tf -> hf conversion""" ):
lowerCAmelCase : List[str] = [k.endswith(SCREAMING_SNAKE_CASE__ ) for ending in KEYS_TO_IGNORE]
if any(SCREAMING_SNAKE_CASE__ ):
continue
lowerCAmelCase : Union[str, Any] = REMAINING_PATTERNS
lowerCAmelCase : int = rename_state_dict_key(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ["""dense""", """query""", """key""", """value"""] ):
lowerCAmelCase : Tuple = v.T
lowerCAmelCase : Dict = torch.from_numpy(SCREAMING_SNAKE_CASE__ )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, F"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
lowerCAmelCase : Dict = mapping["""model.embed_positions.weight"""]
lowerCAmelCase : List[Any] = mapping.pop("""model.embed_positions.weight""" )
lowerCAmelCase , lowerCAmelCase : Dict = torch_model.load_state_dict(SCREAMING_SNAKE_CASE__ ,strict=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : int = [
k
for k in missing
if k
not in [
"""final_logits_bias""",
"""model.encoder.embed_tokens.weight""",
"""model.decoder.embed_tokens.weight""",
"""lm_head.weight""",
]
]
assert unexpected_missing == [], F"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], F"""no matches found for the following tf keys {extra}"""
return torch_model
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = tf.train.list_variables(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : List[str] = {}
lowerCAmelCase : Optional[int] = ["""global_step"""]
for name, shape in tqdm(SCREAMING_SNAKE_CASE__ ,desc="""converting tf checkpoint to dict""" ):
lowerCAmelCase : int = any(pat in name for pat in ignore_name )
if skip_key:
continue
lowerCAmelCase : List[str] = tf.train.load_variable(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Union[str, Any] = array
return tf_weights
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Any = get_tf_weights_as_numpy(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : List[Any] = convert_bigbird_pegasus(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
torch_model.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
lowerCAmelCase : Union[str, Any] =argparse.ArgumentParser()
parser.add_argument('--tf_ckpt_path', type=str, help='passed to tf.train.list_variables')
parser.add_argument('--save_dir', default=None, type=str, help='Path to the output PyTorch model.')
lowerCAmelCase : List[Any] =parser.parse_args()
lowerCAmelCase : Optional[int] ={}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 693 |
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCAmelCase : Optional[int] =logging.get_logger(__name__)
lowerCAmelCase : Union[str, Any] ={
'facebook/detr-resnet-50': 'https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class _a ( snake_case_ ):
_UpperCamelCase: List[str] = "detr"
_UpperCamelCase: Dict = ["past_key_values"]
_UpperCamelCase: Optional[int] = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self , lowercase_=True , lowercase_=None , lowercase_=3 , lowercase_=100 , lowercase_=6 , lowercase_=2048 , lowercase_=8 , lowercase_=6 , lowercase_=2048 , lowercase_=8 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=True , lowercase_="relu" , lowercase_=256 , lowercase_=0.1 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.0_2 , lowercase_=1.0 , lowercase_=False , lowercase_="sine" , lowercase_="resnet50" , lowercase_=True , lowercase_=False , lowercase_=1 , lowercase_=5 , lowercase_=2 , lowercase_=1 , lowercase_=1 , lowercase_=5 , lowercase_=2 , lowercase_=0.1 , **lowercase_ , ) -> Optional[int]:
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
lowerCAmelCase : Optional[Any] = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(lowercase_ , lowercase_ ):
lowerCAmelCase : List[Any] = backbone_config.get("""model_type""" )
lowerCAmelCase : int = CONFIG_MAPPING[backbone_model_type]
lowerCAmelCase : Optional[int] = config_class.from_dict(lowercase_ )
# set timm attributes to None
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Dict = None, None, None
lowerCAmelCase : Any = use_timm_backbone
lowerCAmelCase : int = backbone_config
lowerCAmelCase : Optional[int] = num_channels
lowerCAmelCase : Optional[Any] = num_queries
lowerCAmelCase : List[str] = d_model
lowerCAmelCase : Optional[int] = encoder_ffn_dim
lowerCAmelCase : Dict = encoder_layers
lowerCAmelCase : str = encoder_attention_heads
lowerCAmelCase : List[Any] = decoder_ffn_dim
lowerCAmelCase : List[Any] = decoder_layers
lowerCAmelCase : Union[str, Any] = decoder_attention_heads
lowerCAmelCase : str = dropout
lowerCAmelCase : Dict = attention_dropout
lowerCAmelCase : Union[str, Any] = activation_dropout
lowerCAmelCase : str = activation_function
lowerCAmelCase : Optional[int] = init_std
lowerCAmelCase : Any = init_xavier_std
lowerCAmelCase : Dict = encoder_layerdrop
lowerCAmelCase : int = decoder_layerdrop
lowerCAmelCase : Tuple = encoder_layers
lowerCAmelCase : Optional[int] = auxiliary_loss
lowerCAmelCase : List[str] = position_embedding_type
lowerCAmelCase : Any = backbone
lowerCAmelCase : Union[str, Any] = use_pretrained_backbone
lowerCAmelCase : List[Any] = dilation
# Hungarian matcher
lowerCAmelCase : Tuple = class_cost
lowerCAmelCase : Union[str, Any] = bbox_cost
lowerCAmelCase : Optional[Any] = giou_cost
# Loss coefficients
lowerCAmelCase : List[Any] = mask_loss_coefficient
lowerCAmelCase : Optional[int] = dice_loss_coefficient
lowerCAmelCase : Tuple = bbox_loss_coefficient
lowerCAmelCase : Dict = giou_loss_coefficient
lowerCAmelCase : str = eos_coefficient
super().__init__(is_encoder_decoder=lowercase_ , **lowercase_ )
@property
def _snake_case ( self ) -> int:
return self.encoder_attention_heads
@property
def _snake_case ( self ) -> int:
return self.d_model
@classmethod
def _snake_case ( cls , lowercase_ , **lowercase_ ) -> Any:
return cls(backbone_config=lowercase_ , **lowercase_ )
def _snake_case ( self ) -> Dict[str, any]:
lowerCAmelCase : Optional[int] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
lowerCAmelCase : List[str] = self.backbone_config.to_dict()
lowerCAmelCase : List[Any] = self.__class__.model_type
return output
class _a ( snake_case_ ):
_UpperCamelCase: Any = version.parse("1.11" )
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def _snake_case ( self ) -> float:
return 1e-5
@property
def _snake_case ( self ) -> int:
return 12
| 693 | 1 |
from ..utils import DummyObject, requires_backends
class _a ( metaclass=snake_case_ ):
_UpperCamelCase: List[Any] = ["keras_nlp"]
def __init__( self , *lowercase_ , **lowercase_ ) -> Tuple:
requires_backends(self , ["""keras_nlp"""] )
| 693 |
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
lowerCAmelCase : int =logging.getLogger()
lowerCAmelCase : str =logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class _a ( snake_case_ ):
def _snake_case ( self , lowercase_ ) -> List[Any]:
os.makedirs(lowercase_ , exist_ok=lowercase_ )
lowerCAmelCase : int = {"""source""": """What is love ?""", """target""": """life"""}
lowerCAmelCase : Optional[Any] = {"""train""": 12, """val""": 2, """test""": 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
lowerCAmelCase : Tuple = """\n""".join([contents[field]] * n_lines[split] )
with open(os.path.join(lowercase_ , f"""{split}.{field}""" ) , """w""" ) as f:
f.write(lowercase_ )
def _snake_case ( self , lowercase_ , lowercase_ = "pytorch" ) -> str:
lowerCAmelCase : Dict = self.get_auto_remove_tmp_dir()
lowerCAmelCase : Union[str, Any] = os.path.join(lowercase_ , """output""" )
lowerCAmelCase : Dict = os.path.join(lowercase_ , """data""" )
self._create_dummy_data(data_dir=lowercase_ )
lowerCAmelCase : str = f"""
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
""".split()
if gpus > 0:
testargs.append(f"""--gpus={gpus}""" )
if is_apex_available():
testargs.append("""--fp16""" )
else:
testargs.append("""--gpus=0""" )
testargs.append("""--distributed_backend=ddp_cpu""" )
testargs.append("""--num_processes=2""" )
lowerCAmelCase : Optional[int] = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(lowercase_ , env=self.get_env() )
lowerCAmelCase : Union[str, Any] = os.path.join(lowercase_ , """metrics.json""" )
with open(lowercase_ ) as f:
lowerCAmelCase : List[str] = json.load(lowercase_ )
return result
@require_torch_gpu
def _snake_case ( self ) -> Any:
lowerCAmelCase : Tuple = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_multi_gpu
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase : Dict = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_gpu
@require_ray
def _snake_case ( self ) -> int:
lowerCAmelCase : Optional[Any] = self._run_finetune(gpus=1 , distributed_retriever="""ray""" )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_multi_gpu
@require_ray
def _snake_case ( self ) -> Optional[Any]:
lowerCAmelCase : Optional[Any] = self._run_finetune(gpus=1 , distributed_retriever="""ray""" )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
| 693 | 1 |
from math import factorial
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if successes > trials:
raise ValueError("""successes must be lower or equal to trials""" )
if trials < 0 or successes < 0:
raise ValueError("""the function is defined for non-negative integers""" )
if not isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) or not isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
raise ValueError("""the function is defined for non-negative integers""" )
if not 0 < prob < 1:
raise ValueError("""prob has to be in range of 1 - 0""" )
lowerCAmelCase : List[Any] = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
lowerCAmelCase : int = float(factorial(SCREAMING_SNAKE_CASE__ ) )
coefficient /= factorial(SCREAMING_SNAKE_CASE__ ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('Probability of 2 successes out of 4 trails')
print('with probability of 0.75 is:', end=' ')
print(binomial_distribution(2, 4, 0.7_5))
| 693 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Optional[int] =logging.get_logger(__name__)
lowerCAmelCase : Optional[int] ={
'transfo-xl-wt103': 'https://huggingface.co/transfo-xl-wt103/resolve/main/config.json',
}
class _a ( snake_case_ ):
_UpperCamelCase: Tuple = "transfo-xl"
_UpperCamelCase: str = ["mems"]
_UpperCamelCase: Dict = {
"n_token": "vocab_size",
"hidden_size": "d_model",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , lowercase_=267735 , lowercase_=[20000, 40000, 200000] , lowercase_=1024 , lowercase_=1024 , lowercase_=16 , lowercase_=64 , lowercase_=4096 , lowercase_=4 , lowercase_=False , lowercase_=18 , lowercase_=1600 , lowercase_=1000 , lowercase_=True , lowercase_=True , lowercase_=0 , lowercase_=-1 , lowercase_=True , lowercase_=0.1 , lowercase_=0.0 , lowercase_=True , lowercase_="normal" , lowercase_=0.0_1 , lowercase_=0.0_1 , lowercase_=0.0_2 , lowercase_=1e-5 , lowercase_=0 , **lowercase_ , ) -> Optional[int]:
lowerCAmelCase : List[str] = vocab_size
lowerCAmelCase : Union[str, Any] = []
self.cutoffs.extend(lowercase_ )
if proj_share_all_but_first:
lowerCAmelCase : Optional[int] = [False] + [True] * len(self.cutoffs )
else:
lowerCAmelCase : List[str] = [False] + [False] * len(self.cutoffs )
lowerCAmelCase : Optional[int] = d_model
lowerCAmelCase : List[Any] = d_embed
lowerCAmelCase : Union[str, Any] = d_head
lowerCAmelCase : List[Any] = d_inner
lowerCAmelCase : Optional[int] = div_val
lowerCAmelCase : List[Any] = pre_lnorm
lowerCAmelCase : Dict = n_layer
lowerCAmelCase : Tuple = n_head
lowerCAmelCase : Any = mem_len
lowerCAmelCase : Union[str, Any] = same_length
lowerCAmelCase : List[Any] = attn_type
lowerCAmelCase : int = clamp_len
lowerCAmelCase : List[str] = sample_softmax
lowerCAmelCase : Optional[int] = adaptive
lowerCAmelCase : Dict = dropout
lowerCAmelCase : Optional[Any] = dropatt
lowerCAmelCase : List[str] = untie_r
lowerCAmelCase : List[str] = init
lowerCAmelCase : Tuple = init_range
lowerCAmelCase : str = proj_init_std
lowerCAmelCase : str = init_std
lowerCAmelCase : Optional[int] = layer_norm_epsilon
super().__init__(eos_token_id=lowercase_ , **lowercase_ )
@property
def _snake_case ( self ) -> Optional[Any]:
# Message copied from Transformer-XL documentation
logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def _snake_case ( self , lowercase_ ) -> Dict:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 693 | 1 |
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
lowerCAmelCase : Optional[Any] =2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
lowerCAmelCase : List[Any] ={
# fairseq:
'wmt19-ru-en': {'length_penalty': 1.1},
'wmt19-en-ru': {'length_penalty': 1.1_5},
'wmt19-en-de': {'length_penalty': 1.0},
'wmt19-de-en': {'length_penalty': 1.1},
# allenai:
'wmt16-en-de-dist-12-1': {'length_penalty': 0.6},
'wmt16-en-de-dist-6-1': {'length_penalty': 0.6},
'wmt16-en-de-12-1': {'length_penalty': 0.8},
'wmt19-de-en-6-6-base': {'length_penalty': 0.6},
'wmt19-de-en-6-6-big': {'length_penalty': 0.6},
}
# this remaps the different models to their organization names
lowerCAmelCase : List[Any] ={}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
lowerCAmelCase : Any ='facebook'
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
lowerCAmelCase : Dict ='allenai'
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = dict((re.sub(r"""@@$""" ,"""""" ,SCREAMING_SNAKE_CASE__ ), v) if k.endswith("""@@""" ) else (re.sub(r"""$""" ,"""</w>""" ,SCREAMING_SNAKE_CASE__ ), v) for k, v in d.items() )
lowerCAmelCase : List[Any] = """<s> <pad> </s> <unk>""".split()
# restore the special tokens
for k in keep_keys:
del da[F"""{k}</w>"""]
lowerCAmelCase : Union[str, Any] = d[k] # restore
return da
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
assert os.path.exists(SCREAMING_SNAKE_CASE__ )
os.makedirs(SCREAMING_SNAKE_CASE__ ,exist_ok=SCREAMING_SNAKE_CASE__ )
print(F"""Writing results to {pytorch_dump_folder_path}""" )
# handle various types of models
lowerCAmelCase : Optional[int] = basename(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Optional[int] = dirname(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Union[str, Any] = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
lowerCAmelCase : List[Any] = cls.hub_models()
lowerCAmelCase : Union[str, Any] = {"""bpe""": """fastbpe""", """tokenizer""": """moses"""}
lowerCAmelCase : Tuple = """."""
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(F"""using checkpoint {checkpoint_file}""" )
lowerCAmelCase : Dict = hub_utils.from_pretrained(
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,archive_map=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : int = vars(chkpt["""args"""]["""model"""] )
lowerCAmelCase : Dict = args["""source_lang"""]
lowerCAmelCase : List[Any] = args["""target_lang"""]
lowerCAmelCase : Tuple = dirname(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Optional[int] = basename(SCREAMING_SNAKE_CASE__ )
# dicts
lowerCAmelCase : Optional[Any] = os.path.join(SCREAMING_SNAKE_CASE__ ,F"""dict.{src_lang}.txt""" )
lowerCAmelCase : Union[str, Any] = os.path.join(SCREAMING_SNAKE_CASE__ ,F"""dict.{tgt_lang}.txt""" )
lowerCAmelCase : Dict = Dictionary.load(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : int = rewrite_dict_keys(src_dict.indices )
lowerCAmelCase : Any = len(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Optional[Any] = os.path.join(SCREAMING_SNAKE_CASE__ ,"""vocab-src.json""" )
print(F"""Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records""" )
with open(SCREAMING_SNAKE_CASE__ ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE__ ,ensure_ascii=SCREAMING_SNAKE_CASE__ ,indent=SCREAMING_SNAKE_CASE__ ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
lowerCAmelCase : Union[str, Any] = True
for k in src_vocab.keys():
if not k.islower():
lowerCAmelCase : List[str] = False
break
lowerCAmelCase : Union[str, Any] = Dictionary.load(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Optional[int] = rewrite_dict_keys(tgt_dict.indices )
lowerCAmelCase : Any = len(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Dict = os.path.join(SCREAMING_SNAKE_CASE__ ,"""vocab-tgt.json""" )
print(F"""Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records""" )
with open(SCREAMING_SNAKE_CASE__ ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE__ ,ensure_ascii=SCREAMING_SNAKE_CASE__ ,indent=SCREAMING_SNAKE_CASE__ ) )
# merges_file (bpecodes)
lowerCAmelCase : Tuple = os.path.join(SCREAMING_SNAKE_CASE__ ,VOCAB_FILES_NAMES["""merges_file"""] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
lowerCAmelCase : Optional[int] = os.path.join(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
if os.path.exists(SCREAMING_SNAKE_CASE__ ):
break
with open(SCREAMING_SNAKE_CASE__ ,encoding="""utf-8""" ) as fin:
lowerCAmelCase : Optional[Any] = fin.read()
lowerCAmelCase : Any = re.sub(r""" \d+$""" ,"""""" ,SCREAMING_SNAKE_CASE__ ,0 ,re.M ) # remove frequency number
print(F"""Generating {merges_file}""" )
with open(SCREAMING_SNAKE_CASE__ ,"""w""" ,encoding="""utf-8""" ) as fout:
fout.write(SCREAMING_SNAKE_CASE__ )
# model config
lowerCAmelCase : Any = os.path.join(SCREAMING_SNAKE_CASE__ ,"""config.json""" )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", F"""need to extend tokenizer to support bpe={args['bpe']}"""
assert args["tokenizer"] == "moses", F"""need to extend tokenizer to support bpe={args['tokenizer']}"""
lowerCAmelCase : Optional[int] = {
"""architectures""": ["""FSMTForConditionalGeneration"""],
"""model_type""": """fsmt""",
"""activation_dropout""": args["""activation_dropout"""],
"""activation_function""": """relu""",
"""attention_dropout""": args["""attention_dropout"""],
"""d_model""": args["""decoder_embed_dim"""],
"""dropout""": args["""dropout"""],
"""init_std""": 0.02,
"""max_position_embeddings""": args["""max_source_positions"""],
"""num_hidden_layers""": args["""encoder_layers"""],
"""src_vocab_size""": src_vocab_size,
"""tgt_vocab_size""": tgt_vocab_size,
"""langs""": [src_lang, tgt_lang],
"""encoder_attention_heads""": args["""encoder_attention_heads"""],
"""encoder_ffn_dim""": args["""encoder_ffn_embed_dim"""],
"""encoder_layerdrop""": args["""encoder_layerdrop"""],
"""encoder_layers""": args["""encoder_layers"""],
"""decoder_attention_heads""": args["""decoder_attention_heads"""],
"""decoder_ffn_dim""": args["""decoder_ffn_embed_dim"""],
"""decoder_layerdrop""": args["""decoder_layerdrop"""],
"""decoder_layers""": args["""decoder_layers"""],
"""bos_token_id""": 0,
"""pad_token_id""": 1,
"""eos_token_id""": 2,
"""is_encoder_decoder""": True,
"""scale_embedding""": not args["""no_scale_embedding"""],
"""tie_word_embeddings""": args["""share_all_embeddings"""],
}
# good hparam defaults to start with
lowerCAmelCase : Optional[Any] = 5
lowerCAmelCase : Union[str, Any] = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
lowerCAmelCase : Dict = best_score_hparams[model_dir]["""length_penalty"""]
else:
lowerCAmelCase : Tuple = 1.0
print(F"""Generating {fsmt_model_config_file}""" )
with open(SCREAMING_SNAKE_CASE__ ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE__ ,ensure_ascii=SCREAMING_SNAKE_CASE__ ,indent=SCREAMING_SNAKE_CASE__ ) )
# tokenizer config
lowerCAmelCase : Dict = os.path.join(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : List[Any] = {
"""langs""": [src_lang, tgt_lang],
"""model_max_length""": 1_0_2_4,
"""do_lower_case""": do_lower_case,
}
print(F"""Generating {fsmt_tokenizer_config_file}""" )
with open(SCREAMING_SNAKE_CASE__ ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE__ ,ensure_ascii=SCREAMING_SNAKE_CASE__ ,indent=SCREAMING_SNAKE_CASE__ ) )
# model
lowerCAmelCase : List[Any] = chkpt["""models"""][0]
lowerCAmelCase : Union[str, Any] = model.state_dict()
# rename keys to start with 'model.'
lowerCAmelCase : Optional[int] = OrderedDict(("""model.""" + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
lowerCAmelCase : Dict = [
"""model.model""",
"""model.encoder.version""",
"""model.decoder.version""",
"""model.encoder_embed_tokens.weight""",
"""model.decoder_embed_tokens.weight""",
"""model.encoder.embed_positions._float_tensor""",
"""model.decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
model_state_dict.pop(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : int = FSMTConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : List[Any] = FSMTForConditionalGeneration(SCREAMING_SNAKE_CASE__ )
# check that it loads ok
model_new.load_state_dict(SCREAMING_SNAKE_CASE__ ,strict=SCREAMING_SNAKE_CASE__ )
# save
lowerCAmelCase : str = os.path.join(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
print(F"""Generating {pytorch_weights_dump_path}""" )
torch.save(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
print("""Conversion is done!""" )
print("""\nLast step is to upload the files to s3""" )
print(F"""cd {data_root}""" )
print(F"""transformers-cli upload {model_dir}""" )
if __name__ == "__main__":
lowerCAmelCase : Dict =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fsmt_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'
' bpecodes, etc.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowerCAmelCase : Dict =parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 693 |
import torch
from diffusers import DiffusionPipeline
class _a ( snake_case_ ):
def __init__( self , lowercase_ , lowercase_ ) -> int:
super().__init__()
self.register_modules(unet=lowercase_ , scheduler=lowercase_ )
def __call__( self ) -> List[Any]:
lowerCAmelCase : Union[str, Any] = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
lowerCAmelCase : Union[str, Any] = 1
lowerCAmelCase : Dict = self.unet(lowercase_ , lowercase_ ).sample
lowerCAmelCase : str = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ ).prev_sample
lowerCAmelCase : Dict = scheduler_output - scheduler_output + torch.ones_like(lowercase_ )
return result
| 693 | 1 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ): # noqa: E741
'''simple docstring'''
lowerCAmelCase : Optional[Any] = len(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Optional[int] = 0
lowerCAmelCase : Tuple = [0] * n
lowerCAmelCase : Union[str, Any] = [False] * n
lowerCAmelCase : Union[str, Any] = [False] * n
def dfs(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
if parent == root:
out_edge_count += 1
lowerCAmelCase : Optional[Any] = True
lowerCAmelCase : str = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
lowerCAmelCase : str = dfs(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Dict = min(low[at] ,low[to] )
# AP found via bridge
if at < low[to]:
lowerCAmelCase : str = True
# AP found via cycle
if at == low[to]:
lowerCAmelCase : Tuple = True
else:
lowerCAmelCase : Optional[int] = min(low[at] ,SCREAMING_SNAKE_CASE__ )
return out_edge_count
for i in range(SCREAMING_SNAKE_CASE__ ):
if not visited[i]:
lowerCAmelCase : List[Any] = 0
lowerCAmelCase : List[str] = dfs(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,-1 ,SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Dict = out_edge_count > 1
for x in range(len(SCREAMING_SNAKE_CASE__ ) ):
if is_art[x] is True:
print(SCREAMING_SNAKE_CASE__ )
# Adjacency list of graph
lowerCAmelCase : Tuple ={
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 693 |
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def _UpperCAmelCase ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
requests.request("""GET""" ,"""https://huggingface.co""" )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request("""GET""" ,"""https://huggingface.co""" ,timeout=1.0 )
@pytest.mark.integration
def _UpperCAmelCase ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request("""GET""" ,"""https://huggingface.co""" )
def _UpperCAmelCase ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
http_head("""https://huggingface.co""" )
| 693 | 1 |
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : List[str] =False, False, False
@dataclass
class _a :
_UpperCamelCase: Optional[int] = None
_UpperCamelCase: bool = True
_UpperCamelCase: bool = True
_UpperCamelCase: Optional[str] = None
# Automatically constructed
_UpperCamelCase: ClassVar[str] = "dict"
_UpperCamelCase: ClassVar[Any] = pa.struct({"bytes": pa.binary(), "path": pa.string()} )
_UpperCamelCase: str = field(default="Audio" , init=snake_case_ , repr=snake_case_ )
def __call__( self ) -> List[str]:
return self.pa_type
def _snake_case ( self , lowercase_ ) -> dict:
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("""To support encoding audio data, please install 'soundfile'.""" ) from err
if isinstance(lowercase_ , lowercase_ ):
return {"bytes": None, "path": value}
elif isinstance(lowercase_ , lowercase_ ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
lowerCAmelCase : Dict = BytesIO()
sf.write(lowercase_ , value["""array"""] , value["""sampling_rate"""] , format="""wav""" )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("""path""" ) is not None and os.path.isfile(value["""path"""] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("""pcm""" ):
# "PCM" only has raw audio bytes
if value.get("""sampling_rate""" ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("""To use PCM files, please specify a 'sampling_rate' in Audio object""" )
if value.get("""bytes""" ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
lowerCAmelCase : List[str] = np.frombuffer(value["""bytes"""] , dtype=np.intaa ).astype(np.floataa ) / 32767
else:
lowerCAmelCase : Dict = np.memmap(value["""path"""] , dtype="""h""" , mode="""r""" ).astype(np.floataa ) / 32767
lowerCAmelCase : List[str] = BytesIO(bytes() )
sf.write(lowercase_ , lowercase_ , value["""sampling_rate"""] , format="""wav""" )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("""path""" )}
elif value.get("""bytes""" ) is not None or value.get("""path""" ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("""bytes""" ), "path": value.get("""path""" )}
else:
raise ValueError(
f"""An audio sample should have one of 'path' or 'bytes' but they are missing or None in {value}.""" )
def _snake_case ( self , lowercase_ , lowercase_ = None ) -> dict:
if not self.decode:
raise RuntimeError("""Decoding is disabled for this feature. Please use Audio(decode=True) instead.""" )
lowerCAmelCase , lowerCAmelCase : Tuple = (value["""path"""], BytesIO(value["""bytes"""] )) if value["""bytes"""] is not None else (value["""path"""], None)
if path is None and file is None:
raise ValueError(f"""An audio sample should have one of 'path' or 'bytes' but both are None in {value}.""" )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("""To support decoding audio files, please install 'librosa' and 'soundfile'.""" ) from err
lowerCAmelCase : List[str] = xsplitext(lowercase_ )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"""Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, """
"""You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. """ )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"""Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, """
"""You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. """ )
if file is None:
lowerCAmelCase : Optional[Any] = token_per_repo_id or {}
lowerCAmelCase : Dict = path.split("""::""" )[-1]
try:
lowerCAmelCase : List[Any] = string_to_dict(lowercase_ , config.HUB_DATASETS_URL )["""repo_id"""]
lowerCAmelCase : int = token_per_repo_id[repo_id]
except (ValueError, KeyError):
lowerCAmelCase : Dict = None
with xopen(lowercase_ , """rb""" , use_auth_token=lowercase_ ) as f:
lowerCAmelCase , lowerCAmelCase : Union[str, Any] = sf.read(lowercase_ )
else:
lowerCAmelCase , lowerCAmelCase : List[str] = sf.read(lowercase_ )
lowerCAmelCase : List[Any] = array.T
if self.mono:
lowerCAmelCase : Tuple = librosa.to_mono(lowercase_ )
if self.sampling_rate and self.sampling_rate != sampling_rate:
lowerCAmelCase : int = librosa.resample(lowercase_ , orig_sr=lowercase_ , target_sr=self.sampling_rate )
lowerCAmelCase : List[Any] = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def _snake_case ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
if self.decode:
raise ValueError("""Cannot flatten a decoded Audio feature.""" )
return {
"bytes": Value("""binary""" ),
"path": Value("""string""" ),
}
def _snake_case ( self , lowercase_ ) -> pa.StructArray:
if pa.types.is_string(storage.type ):
lowerCAmelCase : str = pa.array([None] * len(lowercase_ ) , type=pa.binary() )
lowerCAmelCase : Optional[Any] = pa.StructArray.from_arrays([bytes_array, storage] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
lowerCAmelCase : int = pa.array([None] * len(lowercase_ ) , type=pa.string() )
lowerCAmelCase : int = pa.StructArray.from_arrays([storage, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("""array""" ):
lowerCAmelCase : Dict = pa.array([Audio().encode_example(lowercase_ ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("""bytes""" ) >= 0:
lowerCAmelCase : Any = storage.field("""bytes""" )
else:
lowerCAmelCase : Optional[int] = pa.array([None] * len(lowercase_ ) , type=pa.binary() )
if storage.type.get_field_index("""path""" ) >= 0:
lowerCAmelCase : List[Any] = storage.field("""path""" )
else:
lowerCAmelCase : Optional[int] = pa.array([None] * len(lowercase_ ) , type=pa.string() )
lowerCAmelCase : List[str] = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
return array_cast(lowercase_ , self.pa_type )
def _snake_case ( self , lowercase_ ) -> pa.StructArray:
@no_op_if_value_is_null
def path_to_bytes(lowercase_ ):
with xopen(lowercase_ , """rb""" ) as f:
lowerCAmelCase : Dict = f.read()
return bytes_
lowerCAmelCase : Optional[Any] = pa.array(
[
(path_to_bytes(x["""path"""] ) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
lowerCAmelCase : Any = pa.array(
[os.path.basename(lowercase_ ) if path is not None else None for path in storage.field("""path""" ).to_pylist()] , type=pa.string() , )
lowerCAmelCase : Union[str, Any] = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(lowercase_ , self.pa_type )
| 693 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class _a ( unittest.TestCase ):
def __init__( self , lowercase_ , lowercase_=7 , lowercase_=3 , lowercase_=30 , lowercase_=400 , lowercase_=True , lowercase_=None , lowercase_=True , lowercase_=1 / 255 , lowercase_=True , lowercase_=[0.5, 0.5, 0.5] , lowercase_=[0.5, 0.5, 0.5] , lowercase_=True , ) -> Tuple:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
lowerCAmelCase : Optional[Any] = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333}
lowerCAmelCase : Optional[int] = parent
lowerCAmelCase : Optional[int] = batch_size
lowerCAmelCase : Dict = num_channels
lowerCAmelCase : str = min_resolution
lowerCAmelCase : Optional[Any] = max_resolution
lowerCAmelCase : Optional[int] = do_resize
lowerCAmelCase : List[str] = size
lowerCAmelCase : Dict = do_rescale
lowerCAmelCase : Union[str, Any] = rescale_factor
lowerCAmelCase : int = do_normalize
lowerCAmelCase : Union[str, Any] = image_mean
lowerCAmelCase : Dict = image_std
lowerCAmelCase : Optional[int] = do_pad
def _snake_case ( self ) -> Any:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def _snake_case ( self , lowercase_ , lowercase_=False ) -> List[Any]:
if not batched:
lowerCAmelCase : Tuple = image_inputs[0]
if isinstance(lowercase_ , Image.Image ):
lowerCAmelCase , lowerCAmelCase : Dict = image.size
else:
lowerCAmelCase , lowerCAmelCase : Tuple = image.shape[1], image.shape[2]
if w < h:
lowerCAmelCase : Union[str, Any] = int(self.size["""shortest_edge"""] * h / w )
lowerCAmelCase : Optional[Any] = self.size["""shortest_edge"""]
elif w > h:
lowerCAmelCase : List[Any] = self.size["""shortest_edge"""]
lowerCAmelCase : List[Any] = int(self.size["""shortest_edge"""] * w / h )
else:
lowerCAmelCase : Optional[int] = self.size["""shortest_edge"""]
lowerCAmelCase : List[str] = self.size["""shortest_edge"""]
else:
lowerCAmelCase : Optional[int] = []
for image in image_inputs:
lowerCAmelCase , lowerCAmelCase : int = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCAmelCase : Any = max(lowercase_ , key=lambda lowercase_ : item[0] )[0]
lowerCAmelCase : Union[str, Any] = max(lowercase_ , key=lambda lowercase_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _a ( snake_case_ , unittest.TestCase ):
_UpperCamelCase: Optional[Any] = DetrImageProcessor if is_vision_available() else None
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase : List[str] = DetrImageProcessingTester(self )
@property
def _snake_case ( self ) -> str:
return self.image_processor_tester.prepare_image_processor_dict()
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase_ , """image_mean""" ) )
self.assertTrue(hasattr(lowercase_ , """image_std""" ) )
self.assertTrue(hasattr(lowercase_ , """do_normalize""" ) )
self.assertTrue(hasattr(lowercase_ , """do_rescale""" ) )
self.assertTrue(hasattr(lowercase_ , """rescale_factor""" ) )
self.assertTrue(hasattr(lowercase_ , """do_resize""" ) )
self.assertTrue(hasattr(lowercase_ , """size""" ) )
self.assertTrue(hasattr(lowercase_ , """do_pad""" ) )
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333} )
self.assertEqual(image_processor.do_pad , lowercase_ )
lowerCAmelCase : Optional[Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowercase_ )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad , lowercase_ )
def _snake_case ( self ) -> List[Any]:
pass
def _snake_case ( self ) -> List[Any]:
# Initialize image_processing
lowerCAmelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , Image.Image )
# Test not batched input
lowerCAmelCase : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
lowerCAmelCase , lowerCAmelCase : Tuple = self.image_processor_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase , lowerCAmelCase : str = self.image_processor_tester.get_expected_values(lowercase_ , batched=lowercase_ )
lowerCAmelCase : Optional[int] = image_processing(lowercase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _snake_case ( self ) -> Optional[int]:
# Initialize image_processing
lowerCAmelCase : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , numpify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , np.ndarray )
# Test not batched input
lowerCAmelCase : List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
lowerCAmelCase , lowerCAmelCase : Dict = self.image_processor_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase : List[Any] = image_processing(lowercase_ , return_tensors="""pt""" ).pixel_values
lowerCAmelCase , lowerCAmelCase : int = self.image_processor_tester.get_expected_values(lowercase_ , batched=lowercase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _snake_case ( self ) -> List[str]:
# Initialize image_processing
lowerCAmelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , torchify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , torch.Tensor )
# Test not batched input
lowerCAmelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
lowerCAmelCase , lowerCAmelCase : str = self.image_processor_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase : str = image_processing(lowercase_ , return_tensors="""pt""" ).pixel_values
lowerCAmelCase , lowerCAmelCase : List[str] = self.image_processor_tester.get_expected_values(lowercase_ , batched=lowercase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _snake_case ( self ) -> int:
# prepare image and target
lowerCAmelCase : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
lowerCAmelCase : str = json.loads(f.read() )
lowerCAmelCase : List[Any] = {"""image_id""": 39769, """annotations""": target}
# encode them
lowerCAmelCase : Dict = DetrImageProcessor.from_pretrained("""facebook/detr-resnet-50""" )
lowerCAmelCase : List[str] = image_processing(images=lowercase_ , annotations=lowercase_ , return_tensors="""pt""" )
# verify pixel values
lowerCAmelCase : Union[str, Any] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , lowercase_ )
lowerCAmelCase : Union[str, Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , lowercase_ , atol=1e-4 ) )
# verify area
lowerCAmelCase : List[str] = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , lowercase_ ) )
# verify boxes
lowerCAmelCase : Tuple = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , lowercase_ )
lowerCAmelCase : Dict = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , lowercase_ , atol=1e-3 ) )
# verify image_id
lowerCAmelCase : List[Any] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , lowercase_ ) )
# verify is_crowd
lowerCAmelCase : Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , lowercase_ ) )
# verify class_labels
lowerCAmelCase : str = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , lowercase_ ) )
# verify orig_size
lowerCAmelCase : int = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , lowercase_ ) )
# verify size
lowerCAmelCase : str = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , lowercase_ ) )
@slow
def _snake_case ( self ) -> int:
# prepare image, target and masks_path
lowerCAmelCase : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
lowerCAmelCase : Any = json.loads(f.read() )
lowerCAmelCase : Optional[Any] = {"""file_name""": """000000039769.png""", """image_id""": 39769, """segments_info""": target}
lowerCAmelCase : List[str] = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
lowerCAmelCase : Any = DetrImageProcessor.from_pretrained("""facebook/detr-resnet-50-panoptic""" )
lowerCAmelCase : Tuple = image_processing(images=lowercase_ , annotations=lowercase_ , masks_path=lowercase_ , return_tensors="""pt""" )
# verify pixel values
lowerCAmelCase : Tuple = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , lowercase_ )
lowerCAmelCase : str = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , lowercase_ , atol=1e-4 ) )
# verify area
lowerCAmelCase : Union[str, Any] = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , lowercase_ ) )
# verify boxes
lowerCAmelCase : Optional[int] = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , lowercase_ )
lowerCAmelCase : Union[str, Any] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , lowercase_ , atol=1e-3 ) )
# verify image_id
lowerCAmelCase : Tuple = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , lowercase_ ) )
# verify is_crowd
lowerCAmelCase : Any = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , lowercase_ ) )
# verify class_labels
lowerCAmelCase : Tuple = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , lowercase_ ) )
# verify masks
lowerCAmelCase : Union[str, Any] = 822873
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , lowercase_ )
# verify orig_size
lowerCAmelCase : str = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , lowercase_ ) )
# verify size
lowerCAmelCase : List[str] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , lowercase_ ) )
| 693 | 1 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
if subparsers is not None:
lowerCAmelCase : List[Any] = subparsers.add_parser("""test""" )
else:
lowerCAmelCase : str = argparse.ArgumentParser("""Accelerate test command""" )
parser.add_argument(
"""--config_file""" ,default=SCREAMING_SNAKE_CASE__ ,help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """
"""such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """
"""with 'huggingface'."""
) ,)
if subparsers is not None:
parser.set_defaults(func=SCREAMING_SNAKE_CASE__ )
return parser
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : List[Any] = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["""test_utils""", """scripts""", """test_script.py"""] )
if args.config_file is None:
lowerCAmelCase : List[str] = script_name
else:
lowerCAmelCase : int = F"""--config_file={args.config_file} {script_name}"""
lowerCAmelCase : Tuple = ["""accelerate-launch"""] + test_args.split()
lowerCAmelCase : Dict = execute_subprocess_async(SCREAMING_SNAKE_CASE__ ,env=os.environ.copy() )
if result.returncode == 0:
print("""Test is a success! You are ready for your distributed training!""" )
def _UpperCAmelCase ( ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = test_command_parser()
lowerCAmelCase : str = parser.parse_args()
test_command(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
main()
| 693 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Tuple = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Dict = 0
while b > 0:
if b & 1:
lowerCAmelCase : Optional[int] = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 693 | 1 |
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _a :
@staticmethod
def _snake_case ( *lowercase_ , **lowercase_ ) -> Union[str, Any]:
pass
@is_pipeline_test
@require_vision
@require_torch
class _a ( unittest.TestCase ):
_UpperCamelCase: Union[str, Any] = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def _snake_case ( self , lowercase_ , lowercase_ , lowercase_ ) -> str:
lowerCAmelCase : Dict = pipeline(
"""zero-shot-object-detection""" , model="""hf-internal-testing/tiny-random-owlvit-object-detection""" )
lowerCAmelCase : Any = [
{
"""image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
}
]
return object_detector, examples
def _snake_case ( self , lowercase_ , lowercase_ ) -> Dict:
lowerCAmelCase : Dict = object_detector(examples[0] , threshold=0.0 )
lowerCAmelCase : Any = len(lowercase_ )
self.assertGreater(lowercase_ , 0 )
self.assertEqual(
lowercase_ , [
{
"""score""": ANY(lowercase_ ),
"""label""": ANY(lowercase_ ),
"""box""": {"""xmin""": ANY(lowercase_ ), """ymin""": ANY(lowercase_ ), """xmax""": ANY(lowercase_ ), """ymax""": ANY(lowercase_ )},
}
for i in range(lowercase_ )
] , )
@require_tf
@unittest.skip("""Zero Shot Object Detection not implemented in TF""" )
def _snake_case ( self ) -> List[str]:
pass
@require_torch
def _snake_case ( self ) -> Any:
lowerCAmelCase : int = pipeline(
"""zero-shot-object-detection""" , model="""hf-internal-testing/tiny-random-owlvit-object-detection""" )
lowerCAmelCase : Optional[Any] = object_detector(
"""./tests/fixtures/tests_samples/COCO/000000039769.png""" , candidate_labels=["""cat""", """remote""", """couch"""] , threshold=0.6_4 , )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [
{"""score""": 0.7_2_3_5, """label""": """cat""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.7_2_1_8, """label""": """remote""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.7_1_8_4, """label""": """couch""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.6_7_4_8, """label""": """remote""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6_6_5_6, """label""": """cat""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6_6_1_4, """label""": """couch""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6_4_5_6, """label""": """remote""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}},
{"""score""": 0.6_4_2, """label""": """remote""", """box""": {"""xmin""": 67, """ymin""": 274, """xmax""": 93, """ymax""": 297}},
{"""score""": 0.6_4_1_9, """label""": """cat""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}},
] , )
lowerCAmelCase : Tuple = object_detector(
[
{
"""image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
}
] , threshold=0.6_4 , )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [
[
{"""score""": 0.7_2_3_5, """label""": """cat""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.7_2_1_8, """label""": """remote""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.7_1_8_4, """label""": """couch""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.6_7_4_8, """label""": """remote""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6_6_5_6, """label""": """cat""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6_6_1_4, """label""": """couch""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6_4_5_6, """label""": """remote""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}},
{"""score""": 0.6_4_2, """label""": """remote""", """box""": {"""xmin""": 67, """ymin""": 274, """xmax""": 93, """ymax""": 297}},
{"""score""": 0.6_4_1_9, """label""": """cat""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}},
]
] , )
@require_torch
@slow
def _snake_case ( self ) -> Dict:
lowerCAmelCase : Optional[int] = pipeline("""zero-shot-object-detection""" )
lowerCAmelCase : Union[str, Any] = object_detector(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [
{"""score""": 0.2_8_6_8, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}},
{"""score""": 0.2_7_7, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}},
{"""score""": 0.2_5_3_7, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}},
{"""score""": 0.1_4_7_4, """label""": """remote""", """box""": {"""xmin""": 335, """ymin""": 74, """xmax""": 371, """ymax""": 187}},
{"""score""": 0.1_2_0_8, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 642, """ymax""": 476}},
] , )
lowerCAmelCase : Union[str, Any] = object_detector(
[
{
"""image""": """http://images.cocodataset.org/val2017/000000039769.jpg""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
},
{
"""image""": """http://images.cocodataset.org/val2017/000000039769.jpg""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
},
] , )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [
[
{"""score""": 0.2_8_6_8, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}},
{"""score""": 0.2_7_7, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}},
{"""score""": 0.2_5_3_7, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}},
{"""score""": 0.1_4_7_4, """label""": """remote""", """box""": {"""xmin""": 335, """ymin""": 74, """xmax""": 371, """ymax""": 187}},
{"""score""": 0.1_2_0_8, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 642, """ymax""": 476}},
],
[
{"""score""": 0.2_8_6_8, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}},
{"""score""": 0.2_7_7, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}},
{"""score""": 0.2_5_3_7, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}},
{"""score""": 0.1_4_7_4, """label""": """remote""", """box""": {"""xmin""": 335, """ymin""": 74, """xmax""": 371, """ymax""": 187}},
{"""score""": 0.1_2_0_8, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 642, """ymax""": 476}},
],
] , )
@require_tf
@unittest.skip("""Zero Shot Object Detection not implemented in TF""" )
def _snake_case ( self ) -> Any:
pass
@require_torch
@slow
def _snake_case ( self ) -> List[str]:
lowerCAmelCase : List[str] = 0.2
lowerCAmelCase : str = pipeline("""zero-shot-object-detection""" )
lowerCAmelCase : Any = object_detector(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , threshold=lowercase_ , )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [
{"""score""": 0.2_8_6_8, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}},
{"""score""": 0.2_7_7, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}},
{"""score""": 0.2_5_3_7, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}},
] , )
@require_torch
@slow
def _snake_case ( self ) -> Optional[Any]:
lowerCAmelCase : Optional[int] = 2
lowerCAmelCase : Optional[int] = pipeline("""zero-shot-object-detection""" )
lowerCAmelCase : Optional[int] = object_detector(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , top_k=lowercase_ , )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [
{"""score""": 0.2_8_6_8, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}},
{"""score""": 0.2_7_7, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}},
] , )
| 693 |
from math import factorial
class _a :
def __init__( self , lowercase_ , lowercase_ ) -> Optional[Any]:
lowerCAmelCase : Union[str, Any] = real
if isinstance(lowercase_ , lowercase_ ):
lowerCAmelCase : Tuple = [1] * rank
else:
lowerCAmelCase : Any = rank
def __repr__( self ) -> int:
return (
f"""{self.real}+"""
f"""{'+'.join(str(lowercase_ )+'E'+str(n+1 )for n,dual in enumerate(self.duals ) )}"""
)
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase : List[Any] = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , lowercase_ )
def __add__( self , lowercase_ ) -> Tuple:
if not isinstance(lowercase_ , lowercase_ ):
return Dual(self.real + other , self.duals )
lowerCAmelCase : int = self.duals.copy()
lowerCAmelCase : Tuple = other.duals.copy()
if len(lowercase_ ) > len(lowercase_ ):
o_dual.extend([1] * (len(lowercase_ ) - len(lowercase_ )) )
elif len(lowercase_ ) < len(lowercase_ ):
s_dual.extend([1] * (len(lowercase_ ) - len(lowercase_ )) )
lowerCAmelCase : List[Any] = []
for i in range(len(lowercase_ ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , lowercase_ )
_UpperCamelCase: List[Any] = __add__
def __sub__( self , lowercase_ ) -> Union[str, Any]:
return self + other * -1
def __mul__( self , lowercase_ ) -> Optional[int]:
if not isinstance(lowercase_ , lowercase_ ):
lowerCAmelCase : Union[str, Any] = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , lowercase_ )
lowerCAmelCase : Union[str, Any] = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , lowercase_ )
_UpperCamelCase: str = __mul__
def __truediv__( self , lowercase_ ) -> Optional[Any]:
if not isinstance(lowercase_ , lowercase_ ):
lowerCAmelCase : List[str] = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , lowercase_ )
raise ValueError
def __floordiv__( self , lowercase_ ) -> int:
if not isinstance(lowercase_ , lowercase_ ):
lowerCAmelCase : List[Any] = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , lowercase_ )
raise ValueError
def __pow__( self , lowercase_ ) -> str:
if n < 0 or isinstance(lowercase_ , lowercase_ ):
raise ValueError("""power must be a positive integer""" )
if n == 0:
return 1
if n == 1:
return self
lowerCAmelCase : int = self
for _ in range(n - 1 ):
x *= self
return x
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if not callable(SCREAMING_SNAKE_CASE__ ):
raise ValueError("""differentiate() requires a function as input for func""" )
if not isinstance(SCREAMING_SNAKE_CASE__ ,(float, int) ):
raise ValueError("""differentiate() requires a float as input for position""" )
if not isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
raise ValueError("""differentiate() requires an int as input for order""" )
lowerCAmelCase : List[Any] = Dual(SCREAMING_SNAKE_CASE__ ,1 )
lowerCAmelCase : Optional[Any] = func(SCREAMING_SNAKE_CASE__ )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return y**2 * y**4
print(differentiate(f, 9, 2))
| 693 | 1 |
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _a ( snake_case_ , unittest.TestCase ):
_UpperCamelCase: Union[str, Any] = LayoutLMTokenizer
_UpperCamelCase: Optional[Any] = LayoutLMTokenizerFast
_UpperCamelCase: Any = True
_UpperCamelCase: List[Any] = True
def _snake_case ( self ) -> Union[str, Any]:
super().setUp()
lowerCAmelCase : str = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
lowerCAmelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def _snake_case ( self , **lowercase_ ) -> Tuple:
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **lowercase_ )
def _snake_case ( self , lowercase_ ) -> Optional[Any]:
lowerCAmelCase : str = """UNwant\u00E9d,running"""
lowerCAmelCase : str = """unwanted, running"""
return input_text, output_text
def _snake_case ( self ) -> Any:
lowerCAmelCase : Optional[int] = self.tokenizer_class(self.vocab_file )
lowerCAmelCase : List[Any] = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(lowercase_ , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_ ) , [7, 4, 5, 10, 8, 9] )
def _snake_case ( self ) -> List[Any]:
pass
| 693 |
from ..utils import DummyObject, requires_backends
class _a ( metaclass=snake_case_ ):
_UpperCamelCase: List[Any] = ["keras_nlp"]
def __init__( self , *lowercase_ , **lowercase_ ) -> Tuple:
requires_backends(self , ["""keras_nlp"""] )
| 693 | 1 |
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
lowerCAmelCase : str =argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
'--original_config_file',
default=None,
type=str,
help='The YAML config file corresponding to the original architecture.',
)
parser.add_argument(
'--num_in_channels',
default=None,
type=int,
help='The number of input channels. If `None` number of input channels will be automatically inferred.',
)
parser.add_argument(
'--scheduler_type',
default='pndm',
type=str,
help='Type of scheduler to use. Should be one of [\'pndm\', \'lms\', \'ddim\', \'euler\', \'euler-ancestral\', \'dpm\']',
)
parser.add_argument(
'--pipeline_type',
default=None,
type=str,
help=(
'The pipeline type. One of \'FrozenOpenCLIPEmbedder\', \'FrozenCLIPEmbedder\', \'PaintByExample\''
'. If `None` pipeline will be automatically inferred.'
),
)
parser.add_argument(
'--image_size',
default=None,
type=int,
help=(
'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'
' Base. Use 768 for Stable Diffusion v2.'
),
)
parser.add_argument(
'--prediction_type',
default=None,
type=str,
help=(
'The prediction type that the model was trained on. Use \'epsilon\' for Stable Diffusion v1.X and Stable'
' Diffusion v2 Base. Use \'v_prediction\' for Stable Diffusion v2.'
),
)
parser.add_argument(
'--extract_ema',
action='store_true',
help=(
'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'
' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'
' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'
),
)
parser.add_argument(
'--upcast_attention',
action='store_true',
help=(
'Whether the attention computation should always be upcasted. This is necessary when running stable'
' diffusion 2.1.'
),
)
parser.add_argument(
'--from_safetensors',
action='store_true',
help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.',
)
parser.add_argument(
'--to_safetensors',
action='store_true',
help='Whether to store pipeline in safetensors format or not.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
parser.add_argument(
'--stable_unclip',
type=str,
default=None,
required=False,
help='Set if this is a stable unCLIP model. One of \'txt2img\' or \'img2img\'.',
)
parser.add_argument(
'--stable_unclip_prior',
type=str,
default=None,
required=False,
help='Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.',
)
parser.add_argument(
'--clip_stats_path',
type=str,
help='Path to the clip stats file. Only required if the stable unclip model\'s config specifies `model.params.noise_aug_config.params.clip_stats_path`.',
required=False,
)
parser.add_argument(
'--controlnet', action='store_true', default=None, help='Set flag if this is a controlnet checkpoint.'
)
parser.add_argument('--half', action='store_true', help='Save weights in half precision.')
parser.add_argument(
'--vae_path',
type=str,
default=None,
required=False,
help='Set to a path, hub id to an already converted vae to not convert it again.',
)
lowerCAmelCase : Tuple =parser.parse_args()
lowerCAmelCase : Optional[Any] =download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 693 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 693 | 1 |
from __future__ import annotations
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = 0.00
lowerCAmelCase : Optional[int] = 0
for resistor in resistors:
if resistor <= 0:
lowerCAmelCase : List[Any] = F"""Resistor at index {index} has a negative or zero value!"""
raise ValueError(SCREAMING_SNAKE_CASE__ )
first_sum += 1 / float(SCREAMING_SNAKE_CASE__ )
index += 1
return 1 / first_sum
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Dict = 0.00
lowerCAmelCase : Union[str, Any] = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
lowerCAmelCase : Dict = F"""Resistor at index {index} has a negative value!"""
raise ValueError(SCREAMING_SNAKE_CASE__ )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 693 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if p < 2:
raise ValueError("""p should not be less than 2!""" )
elif p == 2:
return True
lowerCAmelCase : List[Any] = 4
lowerCAmelCase : Tuple = (1 << p) - 1
for _ in range(p - 2 ):
lowerCAmelCase : Dict = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 693 | 1 |
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
lowerCAmelCase : str =logging.get_logger(__name__)
class _a ( snake_case_ ):
def __init__( self , *lowercase_ , **lowercase_ ) -> None:
warnings.warn(
"""The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use ImageGPTImageProcessor instead.""" , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_ )
| 693 |
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _a ( snake_case_ , snake_case_ , unittest.TestCase ):
_UpperCamelCase: Optional[Any] = IFImgaImgSuperResolutionPipeline
_UpperCamelCase: int = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"width", "height"}
_UpperCamelCase: Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"original_image"} )
_UpperCamelCase: List[str] = PipelineTesterMixin.required_optional_params - {"latents"}
def _snake_case ( self ) -> int:
return self._get_superresolution_dummy_components()
def _snake_case ( self , lowercase_ , lowercase_=0 ) -> Optional[Any]:
if str(lowercase_ ).startswith("""mps""" ):
lowerCAmelCase : Any = torch.manual_seed(lowercase_ )
else:
lowerCAmelCase : List[Any] = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
lowerCAmelCase : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase_ ) ).to(lowercase_ )
lowerCAmelCase : Optional[Any] = floats_tensor((1, 3, 16, 16) , rng=random.Random(lowercase_ ) ).to(lowercase_ )
lowerCAmelCase : Optional[int] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def _snake_case ( self ) -> Optional[int]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def _snake_case ( self ) -> int:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def _snake_case ( self ) -> Any:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def _snake_case ( self ) -> int:
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def _snake_case ( self ) -> Any:
self._test_save_load_local()
def _snake_case ( self ) -> str:
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 693 | 1 |
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class _a ( snake_case_ ):
_UpperCamelCase: Any = (IPNDMScheduler,)
_UpperCamelCase: int = (("num_inference_steps", 50),)
def _snake_case ( self , **lowercase_ ) -> List[Any]:
lowerCAmelCase : List[str] = {"""num_train_timesteps""": 1000}
config.update(**lowercase_ )
return config
def _snake_case ( self , lowercase_=0 , **lowercase_ ) -> Any:
lowerCAmelCase : Optional[int] = dict(self.forward_default_kwargs )
lowerCAmelCase : Tuple = kwargs.pop("""num_inference_steps""" , lowercase_ )
lowerCAmelCase : List[str] = self.dummy_sample
lowerCAmelCase : int = 0.1 * sample
lowerCAmelCase : int = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase : List[Any] = self.get_scheduler_config(**lowercase_ )
lowerCAmelCase : Union[str, Any] = scheduler_class(**lowercase_ )
scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals
lowerCAmelCase : Optional[int] = dummy_past_residuals[:]
if time_step is None:
lowerCAmelCase : List[str] = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase_ )
lowerCAmelCase : Union[str, Any] = scheduler_class.from_pretrained(lowercase_ )
new_scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals
lowerCAmelCase : Tuple = dummy_past_residuals[:]
lowerCAmelCase : List[str] = scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
lowerCAmelCase : List[Any] = new_scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
lowerCAmelCase : Optional[Any] = scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
lowerCAmelCase : int = new_scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def _snake_case ( self ) -> Any:
pass
def _snake_case ( self , lowercase_=0 , **lowercase_ ) -> str:
lowerCAmelCase : List[str] = dict(self.forward_default_kwargs )
lowerCAmelCase : Union[str, Any] = kwargs.pop("""num_inference_steps""" , lowercase_ )
lowerCAmelCase : int = self.dummy_sample
lowerCAmelCase : Optional[Any] = 0.1 * sample
lowerCAmelCase : List[Any] = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase : Any = self.get_scheduler_config()
lowerCAmelCase : Optional[Any] = scheduler_class(**lowercase_ )
scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals (must be after setting timesteps)
lowerCAmelCase : int = dummy_past_residuals[:]
if time_step is None:
lowerCAmelCase : Tuple = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase_ )
lowerCAmelCase : Optional[Any] = scheduler_class.from_pretrained(lowercase_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowercase_ )
# copy over dummy past residual (must be after setting timesteps)
lowerCAmelCase : int = dummy_past_residuals[:]
lowerCAmelCase : Optional[Any] = scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
lowerCAmelCase : Dict = new_scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
lowerCAmelCase : Optional[Any] = scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
lowerCAmelCase : int = new_scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def _snake_case ( self , **lowercase_ ) -> Optional[int]:
lowerCAmelCase : Optional[Any] = self.scheduler_classes[0]
lowerCAmelCase : Optional[int] = self.get_scheduler_config(**lowercase_ )
lowerCAmelCase : Optional[Any] = scheduler_class(**lowercase_ )
lowerCAmelCase : Optional[int] = 10
lowerCAmelCase : Optional[int] = self.dummy_model()
lowerCAmelCase : Tuple = self.dummy_sample_deter
scheduler.set_timesteps(lowercase_ )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase : List[str] = model(lowercase_ , lowercase_ )
lowerCAmelCase : List[Any] = scheduler.step(lowercase_ , lowercase_ , lowercase_ ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase : Optional[Any] = model(lowercase_ , lowercase_ )
lowerCAmelCase : Optional[Any] = scheduler.step(lowercase_ , lowercase_ , lowercase_ ).prev_sample
return sample
def _snake_case ( self ) -> int:
lowerCAmelCase : int = dict(self.forward_default_kwargs )
lowerCAmelCase : Any = kwargs.pop("""num_inference_steps""" , lowercase_ )
for scheduler_class in self.scheduler_classes:
lowerCAmelCase : Union[str, Any] = self.get_scheduler_config()
lowerCAmelCase : Optional[int] = scheduler_class(**lowercase_ )
lowerCAmelCase : Optional[Any] = self.dummy_sample
lowerCAmelCase : Any = 0.1 * sample
if num_inference_steps is not None and hasattr(lowercase_ , """set_timesteps""" ):
scheduler.set_timesteps(lowercase_ )
elif num_inference_steps is not None and not hasattr(lowercase_ , """set_timesteps""" ):
lowerCAmelCase : Dict = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowerCAmelCase : Dict = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
lowerCAmelCase : Dict = dummy_past_residuals[:]
lowerCAmelCase : Union[str, Any] = scheduler.timesteps[5]
lowerCAmelCase : List[Any] = scheduler.timesteps[6]
lowerCAmelCase : Union[str, Any] = scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
lowerCAmelCase : Optional[int] = scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
lowerCAmelCase : Tuple = scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
lowerCAmelCase : Union[str, Any] = scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def _snake_case ( self ) -> Optional[int]:
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=lowercase_ , time_step=lowercase_ )
def _snake_case ( self ) -> List[Any]:
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=lowercase_ , time_step=lowercase_ )
def _snake_case ( self ) -> Any:
lowerCAmelCase : List[Any] = self.full_loop()
lowerCAmelCase : Dict = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_mean.item() - 2540529 ) < 10
| 693 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Optional[int] =logging.get_logger(__name__)
lowerCAmelCase : List[Any] ={}
class _a ( snake_case_ ):
_UpperCamelCase: Tuple = "llama"
_UpperCamelCase: List[str] = ["past_key_values"]
def __init__( self , lowercase_=32000 , lowercase_=4096 , lowercase_=11008 , lowercase_=32 , lowercase_=32 , lowercase_=None , lowercase_="silu" , lowercase_=2048 , lowercase_=0.0_2 , lowercase_=1e-6 , lowercase_=True , lowercase_=0 , lowercase_=1 , lowercase_=2 , lowercase_=1 , lowercase_=False , lowercase_=None , **lowercase_ , ) -> Optional[int]:
lowerCAmelCase : List[str] = vocab_size
lowerCAmelCase : str = max_position_embeddings
lowerCAmelCase : int = hidden_size
lowerCAmelCase : Optional[int] = intermediate_size
lowerCAmelCase : int = num_hidden_layers
lowerCAmelCase : Any = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
lowerCAmelCase : Any = num_attention_heads
lowerCAmelCase : Any = num_key_value_heads
lowerCAmelCase : Any = hidden_act
lowerCAmelCase : Union[str, Any] = initializer_range
lowerCAmelCase : str = rms_norm_eps
lowerCAmelCase : int = pretraining_tp
lowerCAmelCase : int = use_cache
lowerCAmelCase : Optional[Any] = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , tie_word_embeddings=lowercase_ , **lowercase_ , )
def _snake_case ( self ) -> Dict:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , lowercase_ ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
f"""got {self.rope_scaling}""" )
lowerCAmelCase : Union[str, Any] = self.rope_scaling.get("""type""" , lowercase_ )
lowerCAmelCase : Dict = self.rope_scaling.get("""factor""" , lowercase_ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(lowercase_ , lowercase_ ) or rope_scaling_factor <= 1.0:
raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 693 | 1 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if p < 2:
raise ValueError("""p should not be less than 2!""" )
elif p == 2:
return True
lowerCAmelCase : List[Any] = 4
lowerCAmelCase : Tuple = (1 << p) - 1
for _ in range(p - 2 ):
lowerCAmelCase : Dict = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 693 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase : int =logging.get_logger(__name__)
lowerCAmelCase : List[Any] ={
'microsoft/swin-tiny-patch4-window7-224': (
'https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json'
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class _a ( snake_case_ , snake_case_ ):
_UpperCamelCase: int = "swin"
_UpperCamelCase: str = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , lowercase_=224 , lowercase_=4 , lowercase_=3 , lowercase_=96 , lowercase_=[2, 2, 6, 2] , lowercase_=[3, 6, 12, 24] , lowercase_=7 , lowercase_=4.0 , lowercase_=True , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.1 , lowercase_="gelu" , lowercase_=False , lowercase_=0.0_2 , lowercase_=1e-5 , lowercase_=32 , lowercase_=None , lowercase_=None , **lowercase_ , ) -> Tuple:
super().__init__(**lowercase_ )
lowerCAmelCase : Optional[int] = image_size
lowerCAmelCase : Optional[Any] = patch_size
lowerCAmelCase : Optional[Any] = num_channels
lowerCAmelCase : List[Any] = embed_dim
lowerCAmelCase : str = depths
lowerCAmelCase : List[str] = len(lowercase_ )
lowerCAmelCase : Any = num_heads
lowerCAmelCase : str = window_size
lowerCAmelCase : List[str] = mlp_ratio
lowerCAmelCase : List[Any] = qkv_bias
lowerCAmelCase : List[str] = hidden_dropout_prob
lowerCAmelCase : int = attention_probs_dropout_prob
lowerCAmelCase : Any = drop_path_rate
lowerCAmelCase : int = hidden_act
lowerCAmelCase : int = use_absolute_embeddings
lowerCAmelCase : Dict = layer_norm_eps
lowerCAmelCase : Any = initializer_range
lowerCAmelCase : Dict = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCAmelCase : Any = int(embed_dim * 2 ** (len(lowercase_ ) - 1) )
lowerCAmelCase : Dict = ["""stem"""] + [f"""stage{idx}""" for idx in range(1 , len(lowercase_ ) + 1 )]
lowerCAmelCase , lowerCAmelCase : Optional[Any] = get_aligned_output_features_output_indices(
out_features=lowercase_ , out_indices=lowercase_ , stage_names=self.stage_names )
class _a ( snake_case_ ):
_UpperCamelCase: int = version.parse("1.11" )
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def _snake_case ( self ) -> float:
return 1e-4
| 693 | 1 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class _a ( unittest.TestCase ):
def __init__( self , lowercase_ , lowercase_=7 , lowercase_=3 , lowercase_=30 , lowercase_=400 , lowercase_=True , lowercase_=None , lowercase_=True , lowercase_=1 / 255 , lowercase_=True , lowercase_=[0.5, 0.5, 0.5] , lowercase_=[0.5, 0.5, 0.5] , lowercase_=True , ) -> Tuple:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
lowerCAmelCase : Optional[Any] = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333}
lowerCAmelCase : Optional[int] = parent
lowerCAmelCase : Optional[int] = batch_size
lowerCAmelCase : Dict = num_channels
lowerCAmelCase : str = min_resolution
lowerCAmelCase : Optional[Any] = max_resolution
lowerCAmelCase : Optional[int] = do_resize
lowerCAmelCase : List[str] = size
lowerCAmelCase : Dict = do_rescale
lowerCAmelCase : Union[str, Any] = rescale_factor
lowerCAmelCase : int = do_normalize
lowerCAmelCase : Union[str, Any] = image_mean
lowerCAmelCase : Dict = image_std
lowerCAmelCase : Optional[int] = do_pad
def _snake_case ( self ) -> Any:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def _snake_case ( self , lowercase_ , lowercase_=False ) -> List[Any]:
if not batched:
lowerCAmelCase : Tuple = image_inputs[0]
if isinstance(lowercase_ , Image.Image ):
lowerCAmelCase , lowerCAmelCase : Dict = image.size
else:
lowerCAmelCase , lowerCAmelCase : Tuple = image.shape[1], image.shape[2]
if w < h:
lowerCAmelCase : Union[str, Any] = int(self.size["""shortest_edge"""] * h / w )
lowerCAmelCase : Optional[Any] = self.size["""shortest_edge"""]
elif w > h:
lowerCAmelCase : List[Any] = self.size["""shortest_edge"""]
lowerCAmelCase : List[Any] = int(self.size["""shortest_edge"""] * w / h )
else:
lowerCAmelCase : Optional[int] = self.size["""shortest_edge"""]
lowerCAmelCase : List[str] = self.size["""shortest_edge"""]
else:
lowerCAmelCase : Optional[int] = []
for image in image_inputs:
lowerCAmelCase , lowerCAmelCase : int = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCAmelCase : Any = max(lowercase_ , key=lambda lowercase_ : item[0] )[0]
lowerCAmelCase : Union[str, Any] = max(lowercase_ , key=lambda lowercase_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _a ( snake_case_ , unittest.TestCase ):
_UpperCamelCase: Optional[Any] = DetrImageProcessor if is_vision_available() else None
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase : List[str] = DetrImageProcessingTester(self )
@property
def _snake_case ( self ) -> str:
return self.image_processor_tester.prepare_image_processor_dict()
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase_ , """image_mean""" ) )
self.assertTrue(hasattr(lowercase_ , """image_std""" ) )
self.assertTrue(hasattr(lowercase_ , """do_normalize""" ) )
self.assertTrue(hasattr(lowercase_ , """do_rescale""" ) )
self.assertTrue(hasattr(lowercase_ , """rescale_factor""" ) )
self.assertTrue(hasattr(lowercase_ , """do_resize""" ) )
self.assertTrue(hasattr(lowercase_ , """size""" ) )
self.assertTrue(hasattr(lowercase_ , """do_pad""" ) )
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333} )
self.assertEqual(image_processor.do_pad , lowercase_ )
lowerCAmelCase : Optional[Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowercase_ )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad , lowercase_ )
def _snake_case ( self ) -> List[Any]:
pass
def _snake_case ( self ) -> List[Any]:
# Initialize image_processing
lowerCAmelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , Image.Image )
# Test not batched input
lowerCAmelCase : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
lowerCAmelCase , lowerCAmelCase : Tuple = self.image_processor_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase , lowerCAmelCase : str = self.image_processor_tester.get_expected_values(lowercase_ , batched=lowercase_ )
lowerCAmelCase : Optional[int] = image_processing(lowercase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _snake_case ( self ) -> Optional[int]:
# Initialize image_processing
lowerCAmelCase : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , numpify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , np.ndarray )
# Test not batched input
lowerCAmelCase : List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
lowerCAmelCase , lowerCAmelCase : Dict = self.image_processor_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase : List[Any] = image_processing(lowercase_ , return_tensors="""pt""" ).pixel_values
lowerCAmelCase , lowerCAmelCase : int = self.image_processor_tester.get_expected_values(lowercase_ , batched=lowercase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _snake_case ( self ) -> List[str]:
# Initialize image_processing
lowerCAmelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , torchify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , torch.Tensor )
# Test not batched input
lowerCAmelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
lowerCAmelCase , lowerCAmelCase : str = self.image_processor_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase : str = image_processing(lowercase_ , return_tensors="""pt""" ).pixel_values
lowerCAmelCase , lowerCAmelCase : List[str] = self.image_processor_tester.get_expected_values(lowercase_ , batched=lowercase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _snake_case ( self ) -> int:
# prepare image and target
lowerCAmelCase : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
lowerCAmelCase : str = json.loads(f.read() )
lowerCAmelCase : List[Any] = {"""image_id""": 39769, """annotations""": target}
# encode them
lowerCAmelCase : Dict = DetrImageProcessor.from_pretrained("""facebook/detr-resnet-50""" )
lowerCAmelCase : List[str] = image_processing(images=lowercase_ , annotations=lowercase_ , return_tensors="""pt""" )
# verify pixel values
lowerCAmelCase : Union[str, Any] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , lowercase_ )
lowerCAmelCase : Union[str, Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , lowercase_ , atol=1e-4 ) )
# verify area
lowerCAmelCase : List[str] = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , lowercase_ ) )
# verify boxes
lowerCAmelCase : Tuple = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , lowercase_ )
lowerCAmelCase : Dict = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , lowercase_ , atol=1e-3 ) )
# verify image_id
lowerCAmelCase : List[Any] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , lowercase_ ) )
# verify is_crowd
lowerCAmelCase : Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , lowercase_ ) )
# verify class_labels
lowerCAmelCase : str = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , lowercase_ ) )
# verify orig_size
lowerCAmelCase : int = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , lowercase_ ) )
# verify size
lowerCAmelCase : str = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , lowercase_ ) )
@slow
def _snake_case ( self ) -> int:
# prepare image, target and masks_path
lowerCAmelCase : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
lowerCAmelCase : Any = json.loads(f.read() )
lowerCAmelCase : Optional[Any] = {"""file_name""": """000000039769.png""", """image_id""": 39769, """segments_info""": target}
lowerCAmelCase : List[str] = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
lowerCAmelCase : Any = DetrImageProcessor.from_pretrained("""facebook/detr-resnet-50-panoptic""" )
lowerCAmelCase : Tuple = image_processing(images=lowercase_ , annotations=lowercase_ , masks_path=lowercase_ , return_tensors="""pt""" )
# verify pixel values
lowerCAmelCase : Tuple = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , lowercase_ )
lowerCAmelCase : str = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , lowercase_ , atol=1e-4 ) )
# verify area
lowerCAmelCase : Union[str, Any] = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , lowercase_ ) )
# verify boxes
lowerCAmelCase : Optional[int] = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , lowercase_ )
lowerCAmelCase : Union[str, Any] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , lowercase_ , atol=1e-3 ) )
# verify image_id
lowerCAmelCase : Tuple = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , lowercase_ ) )
# verify is_crowd
lowerCAmelCase : Any = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , lowercase_ ) )
# verify class_labels
lowerCAmelCase : Tuple = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , lowercase_ ) )
# verify masks
lowerCAmelCase : Union[str, Any] = 822873
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , lowercase_ )
# verify orig_size
lowerCAmelCase : str = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , lowercase_ ) )
# verify size
lowerCAmelCase : List[str] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , lowercase_ ) )
| 693 |
lowerCAmelCase : str ={
'Pillow': 'Pillow<10.0.0',
'accelerate': 'accelerate>=0.20.3',
'av': 'av==9.2.0',
'beautifulsoup4': 'beautifulsoup4',
'black': 'black~=23.1',
'codecarbon': 'codecarbon==1.2.0',
'cookiecutter': 'cookiecutter==1.7.3',
'dataclasses': 'dataclasses',
'datasets': 'datasets!=2.5.0',
'decord': 'decord==0.6.0',
'deepspeed': 'deepspeed>=0.9.3',
'diffusers': 'diffusers',
'dill': 'dill<0.3.5',
'evaluate': 'evaluate>=0.2.0',
'fairscale': 'fairscale>0.3',
'faiss-cpu': 'faiss-cpu',
'fastapi': 'fastapi',
'filelock': 'filelock',
'flax': 'flax>=0.4.1,<=0.7.0',
'ftfy': 'ftfy',
'fugashi': 'fugashi>=1.0',
'GitPython': 'GitPython<3.1.19',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.14.1,<1.0',
'importlib_metadata': 'importlib_metadata',
'ipadic': 'ipadic>=1.0.0,<2.0',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2,<=0.4.13',
'jaxlib': 'jaxlib>=0.1.65,<=0.4.13',
'jieba': 'jieba',
'kenlm': 'kenlm',
'keras-nlp': 'keras-nlp>=0.3.1',
'librosa': 'librosa',
'nltk': 'nltk',
'natten': 'natten>=0.14.6',
'numpy': 'numpy>=1.17',
'onnxconverter-common': 'onnxconverter-common',
'onnxruntime-tools': 'onnxruntime-tools>=1.4.2',
'onnxruntime': 'onnxruntime>=1.4.0',
'opencv-python': 'opencv-python',
'optuna': 'optuna',
'optax': 'optax>=0.0.8,<=0.1.4',
'packaging': 'packaging>=20.0',
'parameterized': 'parameterized',
'phonemizer': 'phonemizer',
'protobuf': 'protobuf',
'psutil': 'psutil',
'pyyaml': 'pyyaml>=5.1',
'pydantic': 'pydantic<2',
'pytest': 'pytest>=7.2.0',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'python': 'python>=3.8.0',
'ray[tune]': 'ray[tune]',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'rhoknp': 'rhoknp>=1.1.0,<1.3.1',
'rjieba': 'rjieba',
'rouge-score': 'rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1',
'ruff': 'ruff>=0.0.241,<=0.0.259',
'sacrebleu': 'sacrebleu>=1.4.12,<2.0.0',
'sacremoses': 'sacremoses',
'safetensors': 'safetensors>=0.3.1',
'sagemaker': 'sagemaker>=2.31.0',
'scikit-learn': 'scikit-learn',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'sigopt': 'sigopt',
'starlette': 'starlette',
'sudachipy': 'sudachipy>=0.6.6',
'sudachidict_core': 'sudachidict_core>=20220729',
'tensorflow-cpu': 'tensorflow-cpu>=2.6,<2.14',
'tensorflow': 'tensorflow>=2.6,<2.14',
'tensorflow-text': 'tensorflow-text<2.14',
'tf2onnx': 'tf2onnx',
'timeout-decorator': 'timeout-decorator',
'timm': 'timm',
'tokenizers': 'tokenizers>=0.11.1,!=0.11.3,<0.14',
'torch': 'torch>=1.9,!=1.12.0',
'torchaudio': 'torchaudio',
'torchvision': 'torchvision',
'pyctcdecode': 'pyctcdecode>=0.4.0',
'tqdm': 'tqdm>=4.27',
'unidic': 'unidic>=1.0.2',
'unidic_lite': 'unidic_lite>=1.0.7',
'urllib3': 'urllib3<2.0.0',
'uvicorn': 'uvicorn',
}
| 693 | 1 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ = 4_0_0_0_0_0_0 ):
'''simple docstring'''
lowerCAmelCase : int = []
lowerCAmelCase , lowerCAmelCase : Optional[Any] = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase , lowerCAmelCase : int = b, a + b
return sum(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 693 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase : Union[str, Any] ={
'configuration_roformer': ['ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoFormerConfig', 'RoFormerOnnxConfig'],
'tokenization_roformer': ['RoFormerTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Dict =['RoFormerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[int] =[
'ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoFormerForCausalLM',
'RoFormerForMaskedLM',
'RoFormerForMultipleChoice',
'RoFormerForQuestionAnswering',
'RoFormerForSequenceClassification',
'RoFormerForTokenClassification',
'RoFormerLayer',
'RoFormerModel',
'RoFormerPreTrainedModel',
'load_tf_weights_in_roformer',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Tuple =[
'TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRoFormerForCausalLM',
'TFRoFormerForMaskedLM',
'TFRoFormerForMultipleChoice',
'TFRoFormerForQuestionAnswering',
'TFRoFormerForSequenceClassification',
'TFRoFormerForTokenClassification',
'TFRoFormerLayer',
'TFRoFormerModel',
'TFRoFormerPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : int =[
'FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'FlaxRoFormerForMaskedLM',
'FlaxRoFormerForMultipleChoice',
'FlaxRoFormerForQuestionAnswering',
'FlaxRoFormerForSequenceClassification',
'FlaxRoFormerForTokenClassification',
'FlaxRoFormerModel',
'FlaxRoFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Tuple =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 693 | 1 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
lowerCAmelCase : int =logging.get_logger(__name__)
lowerCAmelCase : Any ={
'Intel/dpt-large': 'https://huggingface.co/Intel/dpt-large/resolve/main/config.json',
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class _a ( snake_case_ ):
_UpperCamelCase: Union[str, Any] = "dpt"
def __init__( self , lowercase_=768 , lowercase_=12 , lowercase_=12 , lowercase_=3072 , lowercase_="gelu" , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.0_2 , lowercase_=1e-12 , lowercase_=384 , lowercase_=16 , lowercase_=3 , lowercase_=False , lowercase_=True , lowercase_=[2, 5, 8, 11] , lowercase_="project" , lowercase_=[4, 2, 1, 0.5] , lowercase_=[96, 192, 384, 768] , lowercase_=256 , lowercase_=-1 , lowercase_=False , lowercase_=True , lowercase_=0.4 , lowercase_=255 , lowercase_=0.1 , lowercase_=[1, 1024, 24, 24] , lowercase_=[0, 1] , lowercase_=None , **lowercase_ , ) -> List[Any]:
super().__init__(**lowercase_ )
lowerCAmelCase : Any = hidden_size
lowerCAmelCase : Optional[Any] = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info("""Initializing the config with a `BiT` backbone.""" )
lowerCAmelCase : Tuple = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
}
lowerCAmelCase : Union[str, Any] = BitConfig(**lowercase_ )
elif isinstance(lowercase_ , lowercase_ ):
logger.info("""Initializing the config with a `BiT` backbone.""" )
lowerCAmelCase : List[str] = BitConfig(**lowercase_ )
elif isinstance(lowercase_ , lowercase_ ):
lowerCAmelCase : Tuple = backbone_config
else:
raise ValueError(
f"""backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.""" )
lowerCAmelCase : Dict = backbone_featmap_shape
lowerCAmelCase : Optional[int] = neck_ignore_stages
if readout_type != "project":
raise ValueError("""Readout type must be 'project' when using `DPT-hybrid` mode.""" )
else:
lowerCAmelCase : Tuple = None
lowerCAmelCase : Any = None
lowerCAmelCase : Optional[int] = []
lowerCAmelCase : Union[str, Any] = num_hidden_layers
lowerCAmelCase : Optional[Any] = num_attention_heads
lowerCAmelCase : Dict = intermediate_size
lowerCAmelCase : Any = hidden_act
lowerCAmelCase : str = hidden_dropout_prob
lowerCAmelCase : Tuple = attention_probs_dropout_prob
lowerCAmelCase : Dict = initializer_range
lowerCAmelCase : Optional[int] = layer_norm_eps
lowerCAmelCase : List[Any] = image_size
lowerCAmelCase : List[Any] = patch_size
lowerCAmelCase : Any = num_channels
lowerCAmelCase : int = qkv_bias
lowerCAmelCase : Tuple = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError("""Readout_type must be one of ['ignore', 'add', 'project']""" )
lowerCAmelCase : Any = readout_type
lowerCAmelCase : Optional[int] = reassemble_factors
lowerCAmelCase : List[Any] = neck_hidden_sizes
lowerCAmelCase : Optional[int] = fusion_hidden_size
lowerCAmelCase : int = head_in_index
lowerCAmelCase : List[str] = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
lowerCAmelCase : Tuple = use_auxiliary_head
lowerCAmelCase : str = auxiliary_loss_weight
lowerCAmelCase : Tuple = semantic_loss_ignore_index
lowerCAmelCase : Tuple = semantic_classifier_dropout
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase : int = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
lowerCAmelCase : Optional[Any] = self.backbone_config.to_dict()
lowerCAmelCase : Tuple = self.__class__.model_type
return output
| 693 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return int(input_a == input_a == 0 )
def _UpperCAmelCase ( ):
'''simple docstring'''
print("""Truth Table of NOR Gate:""" )
print("""| Input 1 | Input 2 | Output |""" )
print(F"""| 0 | 0 | {nor_gate(0 ,0 )} |""" )
print(F"""| 0 | 1 | {nor_gate(0 ,1 )} |""" )
print(F"""| 1 | 0 | {nor_gate(1 ,0 )} |""" )
print(F"""| 1 | 1 | {nor_gate(1 ,1 )} |""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 693 | 1 |
import unittest
from knapsack import knapsack as k
class _a ( unittest.TestCase ):
def _snake_case ( self ) -> str:
lowerCAmelCase : List[str] = 0
lowerCAmelCase : List[str] = [0]
lowerCAmelCase : int = [0]
lowerCAmelCase : Optional[Any] = len(lowercase_ )
self.assertEqual(k.knapsack(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) , 0 )
lowerCAmelCase : Dict = [60]
lowerCAmelCase : List[Any] = [10]
lowerCAmelCase : Optional[Any] = len(lowercase_ )
self.assertEqual(k.knapsack(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) , 0 )
def _snake_case ( self ) -> Optional[Any]:
lowerCAmelCase : str = 3
lowerCAmelCase : Dict = [1, 2, 3]
lowerCAmelCase : str = [3, 2, 1]
lowerCAmelCase : Tuple = len(lowercase_ )
self.assertEqual(k.knapsack(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) , 5 )
def _snake_case ( self ) -> str:
lowerCAmelCase : List[str] = 50
lowerCAmelCase : Any = [60, 100, 120]
lowerCAmelCase : str = [10, 20, 30]
lowerCAmelCase : Any = len(lowercase_ )
self.assertEqual(k.knapsack(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) , 220 )
if __name__ == "__main__":
unittest.main()
| 693 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase : int ={
'configuration_poolformer': [
'POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'PoolFormerConfig',
'PoolFormerOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[Any] =['PoolFormerFeatureExtractor']
lowerCAmelCase : List[str] =['PoolFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Tuple =[
'POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PoolFormerForImageClassification',
'PoolFormerModel',
'PoolFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Any =_LazyModule(__name__, globals()['__file__'], _import_structure)
| 693 | 1 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.