code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : torch.FloatTensor
class lowercase ( _UpperCAmelCase , _UpperCAmelCase ):
@register_to_config
def __init__( self : Optional[Any] , _lowercase : int = 32 , _lowercase : int = 64 , _lowercase : int = 20 , _lowercase : int = 7_68 , _lowercase : Dict=77 , _lowercase : List[str]=4 , _lowercase : float = 0.0 , _lowercase : str = "silu" , _lowercase : Optional[str] = None , _lowercase : Optional[str] = None , _lowercase : Optional[str] = "linear" , _lowercase : Optional[str] = "prd" , _lowercase : Optional[int] = None , _lowercase : Optional[int] = None , _lowercase : Optional[int] = None , ):
super().__init__()
SCREAMING_SNAKE_CASE__ : List[Any] = num_attention_heads
SCREAMING_SNAKE_CASE__ : Tuple = attention_head_dim
SCREAMING_SNAKE_CASE__ : Any = num_attention_heads * attention_head_dim
SCREAMING_SNAKE_CASE__ : Dict = additional_embeddings
SCREAMING_SNAKE_CASE__ : int = time_embed_dim or inner_dim
SCREAMING_SNAKE_CASE__ : List[Any] = embedding_proj_dim or embedding_dim
SCREAMING_SNAKE_CASE__ : Optional[int] = clip_embed_dim or embedding_dim
SCREAMING_SNAKE_CASE__ : Dict = Timesteps(_lowercase , _lowercase , 0 )
SCREAMING_SNAKE_CASE__ : Dict = TimestepEmbedding(_lowercase , _lowercase , out_dim=_lowercase , act_fn=_lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = nn.Linear(_lowercase , _lowercase )
if embedding_proj_norm_type is None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
elif embedding_proj_norm_type == "layer":
SCREAMING_SNAKE_CASE__ : List[str] = nn.LayerNorm(_lowercase )
else:
raise ValueError(f"""unsupported embedding_proj_norm_type: {embedding_proj_norm_type}""" )
SCREAMING_SNAKE_CASE__ : Dict = nn.Linear(_lowercase , _lowercase )
if encoder_hid_proj_type is None:
SCREAMING_SNAKE_CASE__ : Any = None
elif encoder_hid_proj_type == "linear":
SCREAMING_SNAKE_CASE__ : List[str] = nn.Linear(_lowercase , _lowercase )
else:
raise ValueError(f"""unsupported encoder_hid_proj_type: {encoder_hid_proj_type}""" )
SCREAMING_SNAKE_CASE__ : Tuple = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , _lowercase ) )
if added_emb_type == "prd":
SCREAMING_SNAKE_CASE__ : Any = nn.Parameter(torch.zeros(1 , 1 , _lowercase ) )
elif added_emb_type is None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
else:
raise ValueError(
f"""`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`.""" )
SCREAMING_SNAKE_CASE__ : List[str] = nn.ModuleList(
[
BasicTransformerBlock(
_lowercase , _lowercase , _lowercase , dropout=_lowercase , activation_fn='''gelu''' , attention_bias=_lowercase , )
for d in range(_lowercase )
] )
if norm_in_type == "layer":
SCREAMING_SNAKE_CASE__ : Optional[int] = nn.LayerNorm(_lowercase )
elif norm_in_type is None:
SCREAMING_SNAKE_CASE__ : Tuple = None
else:
raise ValueError(f"""Unsupported norm_in_type: {norm_in_type}.""" )
SCREAMING_SNAKE_CASE__ : List[Any] = nn.LayerNorm(_lowercase )
SCREAMING_SNAKE_CASE__ : int = nn.Linear(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -10000.0 )
causal_attention_mask.triu_(1 )
SCREAMING_SNAKE_CASE__ : str = causal_attention_mask[None, ...]
self.register_buffer('''causal_attention_mask''' , _lowercase , persistent=_lowercase )
SCREAMING_SNAKE_CASE__ : Dict = nn.Parameter(torch.zeros(1 , _lowercase ) )
SCREAMING_SNAKE_CASE__ : Dict = nn.Parameter(torch.zeros(1 , _lowercase ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : Dict = {}
def fn_recursive_add_processors(_lowercase : str , _lowercase : torch.nn.Module , _lowercase : Dict[str, AttentionProcessor] ):
if hasattr(_lowercase , '''set_processor''' ):
SCREAMING_SNAKE_CASE__ : Optional[int] = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"""{name}.{sub_name}""" , _lowercase , _lowercase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(_lowercase , _lowercase , _lowercase )
return processors
def lowercase__ ( self : Tuple , _lowercase : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ):
SCREAMING_SNAKE_CASE__ : List[Any] = len(self.attn_processors.keys() )
if isinstance(_lowercase , _lowercase ) and len(_lowercase ) != count:
raise ValueError(
f"""A dict of processors was passed, but the number of processors {len(_lowercase )} does not match the"""
f""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" )
def fn_recursive_attn_processor(_lowercase : str , _lowercase : torch.nn.Module , _lowercase : Tuple ):
if hasattr(_lowercase , '''set_processor''' ):
if not isinstance(_lowercase , _lowercase ):
module.set_processor(_lowercase )
else:
module.set_processor(processor.pop(f"""{name}.processor""" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"""{name}.{sub_name}""" , _lowercase , _lowercase )
for name, module in self.named_children():
fn_recursive_attn_processor(_lowercase , _lowercase , _lowercase )
def lowercase__ ( self : Union[str, Any] ):
self.set_attn_processor(AttnProcessor() )
def lowercase__ ( self : Tuple , _lowercase : Optional[int] , _lowercase : Union[torch.Tensor, float, int] , _lowercase : torch.FloatTensor , _lowercase : Optional[torch.FloatTensor] = None , _lowercase : Optional[torch.BoolTensor] = None , _lowercase : bool = True , ):
SCREAMING_SNAKE_CASE__ : List[str] = hidden_states.shape[0]
SCREAMING_SNAKE_CASE__ : int = timestep
if not torch.is_tensor(_lowercase ):
SCREAMING_SNAKE_CASE__ : List[str] = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(_lowercase ) and len(timesteps.shape ) == 0:
SCREAMING_SNAKE_CASE__ : Dict = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
SCREAMING_SNAKE_CASE__ : Optional[int] = timesteps * torch.ones(_lowercase , dtype=timesteps.dtype , device=timesteps.device )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.time_proj(_lowercase )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
SCREAMING_SNAKE_CASE__ : Tuple = timesteps_projected.to(dtype=self.dtype )
SCREAMING_SNAKE_CASE__ : Tuple = self.time_embedding(_lowercase )
if self.embedding_proj_norm is not None:
SCREAMING_SNAKE_CASE__ : str = self.embedding_proj_norm(_lowercase )
SCREAMING_SNAKE_CASE__ : Tuple = self.embedding_proj(_lowercase )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
SCREAMING_SNAKE_CASE__ : Dict = self.encoder_hidden_states_proj(_lowercase )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('''`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set''' )
SCREAMING_SNAKE_CASE__ : str = self.proj_in(_lowercase )
SCREAMING_SNAKE_CASE__ : Tuple = self.positional_embedding.to(hidden_states.dtype )
SCREAMING_SNAKE_CASE__ : Tuple = []
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0
if encoder_hidden_states is not None:
additional_embeds.append(_lowercase )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
SCREAMING_SNAKE_CASE__ : int = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_states[:, None, :]
SCREAMING_SNAKE_CASE__ : Optional[int] = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
SCREAMING_SNAKE_CASE__ : int = self.prd_embedding.to(hidden_states.dtype ).expand(_lowercase , -1 , -1 )
additional_embeds.append(_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.cat(
_lowercase , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
SCREAMING_SNAKE_CASE__ : Any = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = F.pad(
_lowercase , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
SCREAMING_SNAKE_CASE__ : List[str] = hidden_states + positional_embeddings
if attention_mask is not None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = (1 - attention_mask.to(hidden_states.dtype )) * -10000.0
SCREAMING_SNAKE_CASE__ : int = F.pad(_lowercase , (0, self.additional_embeddings) , value=0.0 )
SCREAMING_SNAKE_CASE__ : Tuple = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
SCREAMING_SNAKE_CASE__ : Dict = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
SCREAMING_SNAKE_CASE__ : Dict = self.norm_in(_lowercase )
for block in self.transformer_blocks:
SCREAMING_SNAKE_CASE__ : str = block(_lowercase , attention_mask=_lowercase )
SCREAMING_SNAKE_CASE__ : Dict = self.norm_out(_lowercase )
if self.prd_embedding is not None:
SCREAMING_SNAKE_CASE__ : Any = hidden_states[:, -1]
else:
SCREAMING_SNAKE_CASE__ : Dict = hidden_states[:, additional_embeddings_len:]
SCREAMING_SNAKE_CASE__ : List[str] = self.proj_to_clip_embeddings(_lowercase )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=_lowercase )
def lowercase__ ( self : Optional[int] , _lowercase : Any ):
SCREAMING_SNAKE_CASE__ : Optional[int] = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 35 | """simple docstring"""
from collections.abc import Sequence
from queue import Queue
class UpperCAmelCase_ :
def __init__( self : Optional[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[int]=None , __UpperCamelCase : Tuple=None ) -> Any:
_UpperCamelCase = start
_UpperCamelCase = end
_UpperCamelCase = val
_UpperCamelCase = (start + end) // 2
_UpperCamelCase = left
_UpperCamelCase = right
def __repr__( self : List[str] ) -> Optional[Any]:
return F'''SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})'''
class UpperCAmelCase_ :
def __init__( self : Any , __UpperCamelCase : Sequence , __UpperCamelCase : Union[str, Any] ) -> Optional[int]:
_UpperCamelCase = collection
_UpperCamelCase = function
if self.collection:
_UpperCamelCase = self._build_tree(0 , len(__UpperCamelCase ) - 1 )
def _UpperCamelCase ( self : Dict , __UpperCamelCase : Dict , __UpperCamelCase : List[Any] ) -> Union[str, Any]:
self._update_tree(self.root , __UpperCamelCase , __UpperCamelCase )
def _UpperCamelCase ( self : Any , __UpperCamelCase : str , __UpperCamelCase : Union[str, Any] ) -> Optional[Any]:
return self._query_range(self.root , __UpperCamelCase , __UpperCamelCase )
def _UpperCamelCase ( self : int , __UpperCamelCase : List[Any] , __UpperCamelCase : str ) -> List[str]:
if start == end:
return SegmentTreeNode(__UpperCamelCase , __UpperCamelCase , self.collection[start] )
_UpperCamelCase = (start + end) // 2
_UpperCamelCase = self._build_tree(__UpperCamelCase , __UpperCamelCase )
_UpperCamelCase = self._build_tree(mid + 1 , __UpperCamelCase )
return SegmentTreeNode(__UpperCamelCase , __UpperCamelCase , self.fn(left.val , right.val ) , __UpperCamelCase , __UpperCamelCase )
def _UpperCamelCase ( self : Union[str, Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[str] , __UpperCamelCase : str ) -> Tuple:
if node.start == i and node.end == i:
_UpperCamelCase = val
return
if i <= node.mid:
self._update_tree(node.left , __UpperCamelCase , __UpperCamelCase )
else:
self._update_tree(node.right , __UpperCamelCase , __UpperCamelCase )
_UpperCamelCase = self.fn(node.left.val , node.right.val )
def _UpperCamelCase ( self : List[str] , __UpperCamelCase : Optional[int] , __UpperCamelCase : int , __UpperCamelCase : Union[str, Any] ) -> Dict:
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , __UpperCamelCase , __UpperCamelCase )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , __UpperCamelCase , node.mid ) , self._query_range(node.right , node.mid + 1 , __UpperCamelCase ) , )
else:
# range in right child tree
return self._query_range(node.right , __UpperCamelCase , __UpperCamelCase )
def _UpperCamelCase ( self : Optional[int] ) -> Tuple:
if self.root is not None:
_UpperCamelCase = Queue()
queue.put(self.root )
while not queue.empty():
_UpperCamelCase = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print("""*""" * 50)
UpperCAmelCase = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 420 | 0 |
'''simple docstring'''
def UpperCamelCase ( lowercase_ : float , lowercase_ : int ) -> float:
'''simple docstring'''
if digit_amount > 0:
return round(number - int(lowercase_ ) , lowercase_ )
return number - int(lowercase_ )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 145 |
'''simple docstring'''
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def UpperCamelCase ( lowercase_ : List[str] , lowercase_ : Optional[Any] , lowercase_ : Dict ) -> Optional[int]:
'''simple docstring'''
lowercase =1.5
lowercase =int(factor * num_class_images )
lowercase =ClipClient(
url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=lowercase_ , aesthetic_weight=0.1 )
os.makedirs(f'{class_data_dir}/images' , exist_ok=lowercase_ )
if len(list(Path(f'{class_data_dir}/images' ).iterdir() ) ) >= num_class_images:
return
while True:
lowercase =client.query(text=lowercase_ )
if len(lowercase_ ) >= factor * num_class_images or num_images > 1E4:
break
else:
lowercase =int(factor * num_images )
lowercase =ClipClient(
url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=lowercase_ , aesthetic_weight=0.1 , )
lowercase =0
lowercase =0
lowercase =tqdm(desc='''downloading real regularization images''' , total=lowercase_ )
with open(f'{class_data_dir}/caption.txt' , '''w''' ) as fa, open(f'{class_data_dir}/urls.txt' , '''w''' ) as fa, open(
f'{class_data_dir}/images.txt' , '''w''' ) as fa:
while total < num_class_images:
lowercase =class_images[count]
count += 1
try:
lowercase =requests.get(images['''url'''] )
if img.status_code == 2_0_0:
lowercase =Image.open(BytesIO(img.content ) )
with open(f'{class_data_dir}/images/{total}.jpg' , '''wb''' ) as f:
f.write(img.content )
fa.write(images['''caption'''] + '''\n''' )
fa.write(images['''url'''] + '''\n''' )
fa.write(f'{class_data_dir}/images/{total}.jpg' + '''\n''' )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def UpperCamelCase ( ) -> Dict:
'''simple docstring'''
lowercase =argparse.ArgumentParser('''''' , add_help=lowercase_ )
parser.add_argument('''--class_prompt''' , help='''text prompt to retrieve images''' , required=lowercase_ , type=lowercase_ )
parser.add_argument('''--class_data_dir''' , help='''path to save images''' , required=lowercase_ , type=lowercase_ )
parser.add_argument('''--num_class_images''' , help='''number of images to download''' , default=2_0_0 , type=lowercase_ )
return parser.parse_args()
if __name__ == "__main__":
_UpperCAmelCase : Optional[int] = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 145 | 1 |
"""simple docstring"""
from itertools import product
def _snake_case ( _snake_case : Dict , _snake_case : Dict ) -> list[int]:
'''simple docstring'''
_A = sides_number
_A = max_face_number * dice_number
_A = [0] * (max_total + 1)
_A = 1
_A = range(_snake_case , max_face_number + 1 )
for dice_numbers in product(_snake_case , repeat=_snake_case ):
_A = sum(_snake_case )
totals_frequencies[total] += 1
return totals_frequencies
def _snake_case ( ) -> float:
'''simple docstring'''
_A = total_frequency_distribution(
sides_number=4 , dice_number=9 )
_A = total_frequency_distribution(
sides_number=6 , dice_number=6 )
_A = 0
_A = 9
_A = 4 * 9
_A = 6
for peter_total in range(_snake_case , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
_A = (4**9) * (6**6)
_A = peter_wins_count / total_games_number
_A = round(_snake_case , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(F'''{solution() = }''')
| 7 |
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
lowerCamelCase_ : int = argparse.ArgumentParser()
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--txt2img_unclip""",
default="""kakaobrain/karlo-v1-alpha""",
type=str,
required=False,
help="""The pretrained txt2img unclip.""",
)
lowerCamelCase_ : Dict = parser.parse_args()
lowerCamelCase_ : str = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
lowerCamelCase_ : int = CLIPImageProcessor()
lowerCamelCase_ : Any = CLIPVisionModelWithProjection.from_pretrained("""openai/clip-vit-large-patch14""")
lowerCamelCase_ : Union[str, Any] = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 548 | 0 |
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def _SCREAMING_SNAKE_CASE ( __lowercase : Optional[int] ) -> List[str]:
"""simple docstring"""
return x + 2
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
__A = """x = 3"""
__A = {}
__A = evaluate(UpperCamelCase_ , {} , state=UpperCamelCase_ )
assert result == 3
self.assertDictEqual(UpperCamelCase_ , {"""x""": 3} )
__A = """x = y"""
__A = {"""y""": 5}
__A = evaluate(UpperCamelCase_ , {} , state=UpperCamelCase_ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(UpperCamelCase_ , {"""x""": 5, """y""": 5} )
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
__A = """y = add_two(x)"""
__A = {"""x""": 3}
__A = evaluate(UpperCamelCase_ , {"""add_two""": add_two} , state=UpperCamelCase_ )
assert result == 5
self.assertDictEqual(UpperCamelCase_ , {"""x""": 3, """y""": 5} )
# Won't work without the tool
with CaptureStdout() as out:
__A = evaluate(UpperCamelCase_ , {} , state=UpperCamelCase_ )
assert result is None
assert "tried to execute add_two" in out.out
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
__A = """x = 3"""
__A = {}
__A = evaluate(UpperCamelCase_ , {} , state=UpperCamelCase_ )
assert result == 3
self.assertDictEqual(UpperCamelCase_ , {"""x""": 3} )
def lowerCAmelCase_ ( self : Any ):
"""simple docstring"""
__A = """test_dict = {'x': x, 'y': add_two(x)}"""
__A = {"""x""": 3}
__A = evaluate(UpperCamelCase_ , {"""add_two""": add_two} , state=UpperCamelCase_ )
self.assertDictEqual(UpperCamelCase_ , {"""x""": 3, """y""": 5} )
self.assertDictEqual(UpperCamelCase_ , {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}} )
def lowerCAmelCase_ ( self : str ):
"""simple docstring"""
__A = """x = 3\ny = 5"""
__A = {}
__A = evaluate(UpperCamelCase_ , {} , state=UpperCamelCase_ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(UpperCamelCase_ , {"""x""": 3, """y""": 5} )
def lowerCAmelCase_ ( self : Optional[int] ):
"""simple docstring"""
__A = """text = f'This is x: {x}.'"""
__A = {"""x""": 3}
__A = evaluate(UpperCamelCase_ , {} , state=UpperCamelCase_ )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(UpperCamelCase_ , {"""x""": 3, """text""": """This is x: 3."""} )
def lowerCAmelCase_ ( self : List[str] ):
"""simple docstring"""
__A = """if x <= 3:\n y = 2\nelse:\n y = 5"""
__A = {"""x""": 3}
__A = evaluate(UpperCamelCase_ , {} , state=UpperCamelCase_ )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(UpperCamelCase_ , {"""x""": 3, """y""": 2} )
__A = {"""x""": 8}
__A = evaluate(UpperCamelCase_ , {} , state=UpperCamelCase_ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(UpperCamelCase_ , {"""x""": 8, """y""": 5} )
def lowerCAmelCase_ ( self : Optional[int] ):
"""simple docstring"""
__A = """test_list = [x, add_two(x)]"""
__A = {"""x""": 3}
__A = evaluate(UpperCamelCase_ , {"""add_two""": add_two} , state=UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , [3, 5] )
self.assertDictEqual(UpperCamelCase_ , {"""x""": 3, """test_list""": [3, 5]} )
def lowerCAmelCase_ ( self : Dict ):
"""simple docstring"""
__A = """y = x"""
__A = {"""x""": 3}
__A = evaluate(UpperCamelCase_ , {} , state=UpperCamelCase_ )
assert result == 3
self.assertDictEqual(UpperCamelCase_ , {"""x""": 3, """y""": 3} )
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
__A = """test_list = [x, add_two(x)]\ntest_list[1]"""
__A = {"""x""": 3}
__A = evaluate(UpperCamelCase_ , {"""add_two""": add_two} , state=UpperCamelCase_ )
assert result == 5
self.assertDictEqual(UpperCamelCase_ , {"""x""": 3, """test_list""": [3, 5]} )
__A = """test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']"""
__A = {"""x""": 3}
__A = evaluate(UpperCamelCase_ , {"""add_two""": add_two} , state=UpperCamelCase_ )
assert result == 5
self.assertDictEqual(UpperCamelCase_ , {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}} )
def lowerCAmelCase_ ( self : Any ):
"""simple docstring"""
__A = """x = 0\nfor i in range(3):\n x = i"""
__A = {}
__A = evaluate(UpperCamelCase_ , {"""range""": range} , state=UpperCamelCase_ )
assert result == 2
self.assertDictEqual(UpperCamelCase_ , {"""x""": 2, """i""": 2} )
| 199 |
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def _SCREAMING_SNAKE_CASE ( __lowercase : str ) -> str:
"""simple docstring"""
if "cls_token" in name:
__A = name.replace("""cls_token""" , """vit.embeddings.cls_token""" )
if "mask_token" in name:
__A = name.replace("""mask_token""" , """decoder.mask_token""" )
if "decoder_pos_embed" in name:
__A = name.replace("""decoder_pos_embed""" , """decoder.decoder_pos_embed""" )
if "pos_embed" in name and "decoder" not in name:
__A = name.replace("""pos_embed""" , """vit.embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
__A = name.replace("""patch_embed.proj""" , """vit.embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
__A = name.replace("""patch_embed.norm""" , """vit.embeddings.norm""" )
if "decoder_blocks" in name:
__A = name.replace("""decoder_blocks""" , """decoder.decoder_layers""" )
if "blocks" in name:
__A = name.replace("""blocks""" , """vit.encoder.layer""" )
if "attn.proj" in name:
__A = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
__A = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
__A = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
__A = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
__A = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
__A = name.replace("""mlp.fc2""" , """output.dense""" )
if "decoder_embed" in name:
__A = name.replace("""decoder_embed""" , """decoder.decoder_embed""" )
if "decoder_norm" in name:
__A = name.replace("""decoder_norm""" , """decoder.decoder_norm""" )
if "decoder_pred" in name:
__A = name.replace("""decoder_pred""" , """decoder.decoder_pred""" )
if "norm.weight" in name and "decoder" not in name:
__A = name.replace("""norm.weight""" , """vit.layernorm.weight""" )
if "norm.bias" in name and "decoder" not in name:
__A = name.replace("""norm.bias""" , """vit.layernorm.bias""" )
return name
def _SCREAMING_SNAKE_CASE ( __lowercase : Any , __lowercase : Dict ) -> str:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
__A = orig_state_dict.pop(__lowercase )
if "qkv" in key:
__A = key.split(""".""" )
__A = int(key_split[1] )
if "decoder_blocks" in key:
__A = config.decoder_hidden_size
__A = """decoder.decoder_layers."""
if "weight" in key:
__A = val[:dim, :]
__A = val[dim : dim * 2, :]
__A = val[-dim:, :]
elif "bias" in key:
__A = val[:dim]
__A = val[dim : dim * 2]
__A = val[-dim:]
else:
__A = config.hidden_size
__A = """vit.encoder.layer."""
if "weight" in key:
__A = val[:dim, :]
__A = val[dim : dim * 2, :]
__A = val[-dim:, :]
elif "bias" in key:
__A = val[:dim]
__A = val[dim : dim * 2]
__A = val[-dim:]
else:
__A = val
return orig_state_dict
def _SCREAMING_SNAKE_CASE ( __lowercase : Tuple , __lowercase : str ) -> Optional[Any]:
"""simple docstring"""
__A = ViTMAEConfig()
if "large" in checkpoint_url:
__A = 1_0_2_4
__A = 4_0_9_6
__A = 2_4
__A = 1_6
elif "huge" in checkpoint_url:
__A = 1_4
__A = 1_2_8_0
__A = 5_1_2_0
__A = 3_2
__A = 1_6
__A = ViTMAEForPreTraining(__lowercase )
__A = torch.hub.load_state_dict_from_url(__lowercase , map_location="""cpu""" )["""model"""]
__A = ViTMAEImageProcessor(size=config.image_size )
__A = convert_state_dict(__lowercase , __lowercase )
model.load_state_dict(__lowercase )
model.eval()
__A = """https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg"""
__A = Image.open(requests.get(__lowercase , stream=__lowercase ).raw )
__A = ViTMAEImageProcessor(size=config.image_size )
__A = image_processor(images=__lowercase , return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
__A = model(**__lowercase )
__A = outputs.logits
if "large" in checkpoint_url:
__A = torch.tensor(
[[-0.7_309, -0.7_128, -1.0_169], [-1.0_161, -0.9_058, -1.1_878], [-1.0_478, -0.9_411, -1.1_911]] )
elif "huge" in checkpoint_url:
__A = torch.tensor(
[[-1.1_599, -0.9_199, -1.2_221], [-1.1_952, -0.9_269, -1.2_307], [-1.2_143, -0.9_337, -1.2_262]] )
else:
__A = torch.tensor(
[[-0.9_192, -0.8_481, -1.1_259], [-1.1_349, -1.0_034, -1.2_599], [-1.1_757, -1.0_429, -1.2_726]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] , __lowercase , atol=1E-4 )
print(f"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(__lowercase )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__lowercase )
if __name__ == "__main__":
__a : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth",
type=str,
help="URL of the checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
__a : List[str] = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 199 | 1 |
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
lowerCAmelCase__ = logging.getLogger(__name__)
lowerCAmelCase__ = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
lowerCAmelCase__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class a__ :
"""simple docstring"""
__lowerCamelCase = field(
default=lowercase_ , metadata={
'help': (
'The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'
)
} , )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(lowercase_ )} , )
__lowerCamelCase = field(
default=lowercase_ , metadata={
'help': (
'Override some existing default config settings when a model is trained from scratch. Example: '
'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'
)
} , )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
__lowerCamelCase = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
__lowerCamelCase = field(
default=lowercase_ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
"--config_overrides can't be used in combination with --config_name or --model_name_or_path" )
@dataclass
class a__ :
"""simple docstring"""
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
__lowerCamelCase = field(default=lowercase_ , metadata={'help': 'The input training data file (a text file).'} )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'} , )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'An optional input train ref data file for whole word masking in Chinese.'} , )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'An optional input validation ref data file for whole word masking in Chinese.'} , )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
__lowerCamelCase = field(
default=5 , metadata={
'help': 'The percentage of the train set used as validation set in case there\'s no validation split'
} , )
__lowerCamelCase = field(
default=lowercase_ , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated. Default to the max input length of the model.'
)
} , )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
__lowerCamelCase = field(
default=0.15 , metadata={'help': 'Ratio of tokens to mask for masked language modeling loss'} )
__lowerCamelCase = field(
default=lowercase_ , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
def UpperCamelCase ( self ) -> Any:
'''simple docstring'''
if self.train_file is not None:
A__ = self.train_file.split("." )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
A__ = self.validation_file.split("." )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Dict , SCREAMING_SNAKE_CASE_: Optional[int] ) -> str:
'''simple docstring'''
with open(__snake_case , "r" , encoding="utf-8" ) as f:
A__ = [json.loads(__snake_case ) for line in f.read().splitlines() if (len(__snake_case ) > 0 and not line.isspace())]
assert len(__snake_case ) == len(__snake_case )
A__ = {c: dataset[c] for c in dataset.column_names}
A__ = refs
return Dataset.from_dict(__snake_case )
def lowerCAmelCase__ ( ) -> str:
'''simple docstring'''
A__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
A__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
A__ = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
A__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
A__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , __snake_case )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
A__ = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
A__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'train[:{data_args.validation_split_percentage}%]' , )
A__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'train[{data_args.validation_split_percentage}%:]' , )
else:
A__ = {}
if data_args.train_file is not None:
A__ = data_args.train_file
if data_args.validation_file is not None:
A__ = data_args.validation_file
A__ = data_args.train_file.split("." )[-1]
if extension == "txt":
A__ = """text"""
A__ = load_dataset(__snake_case , data_files=__snake_case )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A__ = {
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name:
A__ = AutoConfig.from_pretrained(model_args.config_name , **__snake_case )
elif model_args.model_name_or_path:
A__ = AutoConfig.from_pretrained(model_args.model_name_or_path , **__snake_case )
else:
A__ = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch." )
if model_args.config_overrides is not None:
logger.info(F'Overriding config: {model_args.config_overrides}' )
config.update_from_string(model_args.config_overrides )
logger.info(F'New config: {config}' )
A__ = {
"""cache_dir""": model_args.cache_dir,
"""use_fast""": model_args.use_fast_tokenizer,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
A__ = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **__snake_case )
elif model_args.model_name_or_path:
A__ = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **__snake_case )
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name." )
if model_args.model_name_or_path:
A__ = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=__snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("Training new model from scratch" )
A__ = AutoModelForMaskedLM.from_config(__snake_case )
model.resize_token_embeddings(len(__snake_case ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
A__ = datasets["""train"""].column_names
else:
A__ = datasets["""validation"""].column_names
A__ = """text""" if """text""" in column_names else column_names[0]
A__ = """max_length""" if data_args.pad_to_max_length else False
def tokenize_function(SCREAMING_SNAKE_CASE_: Dict ):
# Remove empty lines
A__ = [line for line in examples["""text"""] if len(__snake_case ) > 0 and not line.isspace()]
return tokenizer(examples["text"] , padding=__snake_case , truncation=__snake_case , max_length=data_args.max_seq_length )
A__ = datasets.map(
__snake_case , batched=__snake_case , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
A__ = add_chinese_references(tokenized_datasets["train"] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
A__ = add_chinese_references(
tokenized_datasets["validation"] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
A__ = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
A__ = False
# Data collator
# This one will take care of randomly masking the tokens.
A__ = DataCollatorForWholeWordMask(tokenizer=__snake_case , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
A__ = Trainer(
model=__snake_case , args=__snake_case , train_dataset=tokenized_datasets["train"] if training_args.do_train else None , eval_dataset=tokenized_datasets["validation"] if training_args.do_eval else None , tokenizer=__snake_case , data_collator=__snake_case , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
A__ = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
A__ = model_args.model_name_or_path
else:
A__ = None
A__ = trainer.train(resume_from_checkpoint=__snake_case )
trainer.save_model() # Saves the tokenizer too for easy upload
A__ = os.path.join(training_args.output_dir , "train_results.txt" )
if trainer.is_world_process_zero():
with open(__snake_case , "w" ) as writer:
logger.info("***** Train results *****" )
for key, value in sorted(train_result.metrics.items() ):
logger.info(F' {key} = {value}' )
writer.write(F'{key} = {value}\n' )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json" ) )
# Evaluation
A__ = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
A__ = trainer.evaluate()
A__ = math.exp(eval_output["eval_loss"] )
A__ = perplexity
A__ = os.path.join(training_args.output_dir , "eval_results_mlm_wwm.txt" )
if trainer.is_world_process_zero():
with open(__snake_case , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in sorted(results.items() ):
logger.info(F' {key} = {value}' )
writer.write(F'{key} = {value}\n' )
return results
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Any ) -> Optional[int]:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 514 |
'''simple docstring'''
import argparse
import os
import re
__snake_case : Dict = 'src/diffusers'
# Pattern that looks at the indentation in a line.
__snake_case : Optional[Any] = re.compile(r'^(\s*)\S')
# Pattern that matches `"key":" and puts `key` in group 0.
__snake_case : Tuple = re.compile(r'^\s*"([^"]+)":')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
__snake_case : Dict = re.compile(r'^\s*_import_structure\["([^"]+)"\]')
# Pattern that matches `"key",` and puts `key` in group 0.
__snake_case : Union[str, Any] = re.compile(r'^\s*"([^"]+)",\s*$')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
__snake_case : Any = re.compile(r'\[([^\]]+)\]')
def __lowerCamelCase ( __snake_case : Optional[int] ) -> Any:
"""simple docstring"""
A__ : Optional[int] =_re_indent.search(__snake_case )
return "" if search is None else search.groups()[0]
def __lowerCamelCase ( __snake_case : str, __snake_case : Union[str, Any]="", __snake_case : Tuple=None, __snake_case : Tuple=None ) -> List[str]:
"""simple docstring"""
A__ : str =0
A__ : List[Any] =code.split("""\n""" )
if start_prompt is not None:
while not lines[index].startswith(__snake_case ):
index += 1
A__ : Union[str, Any] =["""\n""".join(lines[:index] )]
else:
A__ : Tuple =[]
# We split into blocks until we get to the `end_prompt` (or the end of the block).
A__ : int =[lines[index]]
index += 1
while index < len(__snake_case ) and (end_prompt is None or not lines[index].startswith(__snake_case )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(__snake_case ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + """ """ ):
current_block.append(lines[index] )
blocks.append("""\n""".join(__snake_case ) )
if index < len(__snake_case ) - 1:
A__ : Any =[lines[index + 1]]
index += 1
else:
A__ : List[str] =[]
else:
blocks.append("""\n""".join(__snake_case ) )
A__ : Any =[lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(__snake_case ) > 0:
blocks.append("""\n""".join(__snake_case ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(__snake_case ):
blocks.append("""\n""".join(lines[index:] ) )
return blocks
def __lowerCamelCase ( __snake_case : Dict ) -> Dict:
"""simple docstring"""
def _inner(__snake_case : List[Any] ):
return key(__snake_case ).lower().replace("""_""", """""" )
return _inner
def __lowerCamelCase ( __snake_case : List[str], __snake_case : Union[str, Any]=None ) -> List[Any]:
"""simple docstring"""
def noop(__snake_case : int ):
return x
if key is None:
A__ : Optional[int] =noop
# Constants are all uppercase, they go first.
A__ : Tuple =[obj for obj in objects if key(__snake_case ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
A__ : List[str] =[obj for obj in objects if key(__snake_case )[0].isupper() and not key(__snake_case ).isupper()]
# Functions begin with a lowercase, they go last.
A__ : Union[str, Any] =[obj for obj in objects if not key(__snake_case )[0].isupper()]
A__ : Union[str, Any] =ignore_underscore(__snake_case )
return sorted(__snake_case, key=__snake_case ) + sorted(__snake_case, key=__snake_case ) + sorted(__snake_case, key=__snake_case )
def __lowerCamelCase ( __snake_case : Union[str, Any] ) -> Tuple:
"""simple docstring"""
def _replace(__snake_case : Any ):
A__ : str =match.groups()[0]
if "," not in imports:
return f"[{imports}]"
A__ : Tuple =[part.strip().replace("""\"""", """""" ) for part in imports.split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
A__ : int =keys[:-1]
return "[" + ", ".join([f"\"{k}\"" for k in sort_objects(__snake_case )] ) + "]"
A__ : int =import_statement.split("""\n""" )
if len(__snake_case ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
A__ : Optional[int] =2 if lines[1].strip() == """[""" else 1
A__ : Optional[int] =[(i, _re_strip_line.search(__snake_case ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
A__ : List[str] =sort_objects(__snake_case, key=lambda __snake_case : x[1] )
A__ : Tuple =[lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(__snake_case ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
A__ : List[Any] =_re_bracket_content.sub(_replace, lines[1] )
else:
A__ : List[str] =[part.strip().replace("""\"""", """""" ) for part in lines[1].split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
A__ : List[Any] =keys[:-1]
A__ : List[Any] =get_indent(lines[1] ) + """, """.join([f"\"{k}\"" for k in sort_objects(__snake_case )] )
return "\n".join(__snake_case )
else:
# Finally we have to deal with imports fitting on one line
A__ : Union[str, Any] =_re_bracket_content.sub(_replace, __snake_case )
return import_statement
def __lowerCamelCase ( __snake_case : List[str], __snake_case : str=True ) -> Optional[int]:
"""simple docstring"""
with open(__snake_case, """r""" ) as f:
A__ : str =f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
A__ : Any =split_code_in_indented_blocks(
__snake_case, start_prompt="""_import_structure = {""", end_prompt="""if TYPE_CHECKING:""" )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1, len(__snake_case ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
A__ : Optional[Any] =main_blocks[block_idx]
A__ : Optional[Any] =block.split("""\n""" )
# Get to the start of the imports.
A__ : Optional[Any] =0
while line_idx < len(__snake_case ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
A__ : Dict =len(__snake_case )
else:
line_idx += 1
if line_idx >= len(__snake_case ):
continue
# Ignore beginning and last line: they don't contain anything.
A__ : str ="""\n""".join(block_lines[line_idx:-1] )
A__ : Dict =get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
A__ : Dict =split_code_in_indented_blocks(__snake_case, indent_level=__snake_case )
# We have two categories of import key: list or _import_structure[key].append/extend
A__ : int =_re_direct_key if """_import_structure""" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
A__ : int =[(pattern.search(__snake_case ).groups()[0] if pattern.search(__snake_case ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
A__ : str =[(i, key) for i, key in enumerate(__snake_case ) if key is not None]
A__ : Optional[int] =[x[0] for x in sorted(__snake_case, key=lambda __snake_case : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
A__ : Optional[Any] =0
A__ : int =[]
for i in range(len(__snake_case ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
A__ : Union[str, Any] =sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(__snake_case )
count += 1
# And we put our main block back together with its first and last line.
A__ : Any ="""\n""".join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(__snake_case ):
if check_only:
return True
else:
print(f"Overwriting {file}." )
with open(__snake_case, """w""" ) as f:
f.write("""\n""".join(__snake_case ) )
def __lowerCamelCase ( __snake_case : Dict=True ) -> Any:
"""simple docstring"""
A__ : Optional[Any] =[]
for root, _, files in os.walk(__snake_case ):
if "__init__.py" in files:
A__ : Tuple =sort_imports(os.path.join(__snake_case, """__init__.py""" ), check_only=__snake_case )
if result:
A__ : str =[os.path.join(__snake_case, """__init__.py""" )]
if len(__snake_case ) > 0:
raise ValueError(f"Would overwrite {len(__snake_case )} files, run `make style`." )
if __name__ == "__main__":
__snake_case : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
__snake_case : int = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 215 | 0 |
from __future__ import annotations
import math
def lowerCAmelCase ( _lowerCAmelCase : int ):
"""simple docstring"""
if num <= 0:
UpperCAmelCase__ = F'''{num}: Invalid input, please enter a positive integer.'''
raise ValueError(_lowerCAmelCase )
UpperCAmelCase__ = [True] * (num + 1)
UpperCAmelCase__ = []
UpperCAmelCase__ = 2
UpperCAmelCase__ = int(math.sqrt(_lowerCAmelCase ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(_lowerCAmelCase )
# Set multiples of start be False
for i in range(start * start , num + 1 , _lowerCAmelCase ):
if sieve[i] is True:
UpperCAmelCase__ = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(_lowerCAmelCase )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input("Enter a positive integer: ").strip())))
| 364 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowerCAmelCase : Optional[int] = {
"configuration_maskformer": ["MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "MaskFormerConfig"],
"configuration_maskformer_swin": ["MaskFormerSwinConfig"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Optional[int] = ["MaskFormerFeatureExtractor"]
_lowerCAmelCase : Dict = ["MaskFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : int = [
"MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"MaskFormerForInstanceSegmentation",
"MaskFormerModel",
"MaskFormerPreTrainedModel",
]
_lowerCAmelCase : List[str] = [
"MaskFormerSwinBackbone",
"MaskFormerSwinModel",
"MaskFormerSwinPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : str = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 364 | 1 |
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE )
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCAmelCase__ = field(default='''image-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
UpperCAmelCase__ = Features({'''image''': Image()} )
UpperCAmelCase__ = Features({'''labels''': ClassLabel} )
UpperCAmelCase__ = "image"
UpperCAmelCase__ = "labels"
def snake_case__ ( self : Tuple , lowercase__ : Optional[Any] ) ->List[Any]:
'''simple docstring'''
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , lowercase__ ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
_UpperCamelCase : int = copy.deepcopy(self )
_UpperCamelCase : List[str] = self.label_schema.copy()
_UpperCamelCase : List[Any] = features[self.label_column]
_UpperCamelCase : Optional[Any] = label_schema
return task_template
@property
def snake_case__ ( self : Any ) ->Dict[str, str]:
'''simple docstring'''
return {
self.image_column: "image",
self.label_column: "labels",
}
| 435 | '''simple docstring'''
def __A ( UpperCAmelCase ) -> str:
'''simple docstring'''
if isinstance(UpperCAmelCase ,UpperCAmelCase ):
raise TypeError("'float' object cannot be interpreted as an integer" )
if isinstance(UpperCAmelCase ,UpperCAmelCase ):
raise TypeError("'str' object cannot be interpreted as an integer" )
if num == 0:
return "0b0"
_UpperCamelCase : Dict = False
if num < 0:
_UpperCamelCase : Any = True
_UpperCamelCase : Any = -num
_UpperCamelCase : list[int] = []
while num > 0:
binary.insert(0 ,num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(UpperCAmelCase ) for e in binary )
return "0b" + "".join(str(UpperCAmelCase ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 435 | 1 |
def __UpperCAmelCase ( __A ) -> int:
'''simple docstring'''
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
UpperCAmelCase__ = 1
UpperCAmelCase__ = 1
while repunit:
UpperCAmelCase__ = (1_0 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def __UpperCAmelCase ( __A = 1_0_0_0_0_0_0 ) -> int:
'''simple docstring'''
UpperCAmelCase__ = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(__A ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(f"{solution() = }")
| 277 |
from __future__ import annotations
class lowercase__ :
def __init__( self : int , _lowercase : list[list[int]] ):
"""simple docstring"""
UpperCAmelCase__ = TypeError(
"Matrices must be formed from a list of zero or more lists containing at "
"least one and the same number of values, each of which must be of type "
"int or float." )
if len(_lowercase ) != 0:
UpperCAmelCase__ = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(_lowercase ) != cols:
raise error
for value in row:
if not isinstance(_lowercase , (int, float) ):
raise error
UpperCAmelCase__ = rows
else:
UpperCAmelCase__ = []
def _UpperCAmelCase ( self : Dict ):
"""simple docstring"""
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def _UpperCAmelCase ( self : str ):
"""simple docstring"""
return len(self.rows )
@property
def _UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
return len(self.rows[0] )
@property
def _UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
return (self.num_rows, self.num_columns)
@property
def _UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
return self.order[0] == self.order[1]
def _UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(_lowercase )
def _UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def _UpperCAmelCase ( self : Any ):
"""simple docstring"""
return bool(self.determinant() )
def _UpperCAmelCase ( self : Optional[Any] , _lowercase : int , _lowercase : int ):
"""simple docstring"""
UpperCAmelCase__ = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(_lowercase ).determinant()
def _UpperCAmelCase ( self : str , _lowercase : int , _lowercase : int ):
"""simple docstring"""
if (row + column) % 2 == 0:
return self.get_minor(_lowercase , _lowercase )
return -1 * self.get_minor(_lowercase , _lowercase )
def _UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
return Matrix(
[
[self.get_minor(_lowercase , _lowercase ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def _UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def _UpperCAmelCase ( self : str ):
"""simple docstring"""
UpperCAmelCase__ = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(_lowercase )
def _UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = self.determinant()
if not determinant:
raise TypeError("Only matrices with a non-zero determinant have an inverse" )
return self.adjugate() * (1 / determinant)
def __repr__( self : Dict ):
"""simple docstring"""
return str(self.rows )
def __str__( self : Tuple ):
"""simple docstring"""
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
"[" + ". ".join([str(_lowercase ) for value in row] ) + ".]"
for row in self.rows
] )
+ "]"
)
def _UpperCAmelCase ( self : Any , _lowercase : list[int] , _lowercase : int | None = None ):
"""simple docstring"""
UpperCAmelCase__ = TypeError("Row must be a list containing all ints and/or floats" )
if not isinstance(_lowercase , _lowercase ):
raise type_error
for value in row:
if not isinstance(_lowercase , (int, float) ):
raise type_error
if len(_lowercase ) != self.num_columns:
raise ValueError(
"Row must be equal in length to the other rows in the matrix" )
if position is None:
self.rows.append(_lowercase )
else:
UpperCAmelCase__ = self.rows[0:position] + [row] + self.rows[position:]
def _UpperCAmelCase ( self : Optional[Any] , _lowercase : list[int] , _lowercase : int | None = None ):
"""simple docstring"""
UpperCAmelCase__ = TypeError(
"Column must be a list containing all ints and/or floats" )
if not isinstance(_lowercase , _lowercase ):
raise type_error
for value in column:
if not isinstance(_lowercase , (int, float) ):
raise type_error
if len(_lowercase ) != self.num_rows:
raise ValueError(
"Column must be equal in length to the other columns in the matrix" )
if position is None:
UpperCAmelCase__ = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
UpperCAmelCase__ = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self : Dict , _lowercase : object ):
"""simple docstring"""
if not isinstance(_lowercase , _lowercase ):
return NotImplemented
return self.rows == other.rows
def __ne__( self : List[Any] , _lowercase : object ):
"""simple docstring"""
return not self == other
def __neg__( self : int ):
"""simple docstring"""
return self * -1
def __add__( self : List[Any] , _lowercase : Matrix ):
"""simple docstring"""
if self.order != other.order:
raise ValueError("Addition requires matrices of the same order" )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self : Optional[int] , _lowercase : Matrix ):
"""simple docstring"""
if self.order != other.order:
raise ValueError("Subtraction requires matrices of the same order" )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self : int , _lowercase : Matrix | int | float ):
"""simple docstring"""
if isinstance(_lowercase , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(_lowercase , _lowercase ):
if self.num_columns != other.num_rows:
raise ValueError(
"The number of columns in the first matrix must "
"be equal to the number of rows in the second" )
return Matrix(
[
[Matrix.dot_product(_lowercase , _lowercase ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
"A Matrix can only be multiplied by an int, float, or another matrix" )
def __pow__( self : Optional[Any] , _lowercase : int ):
"""simple docstring"""
if not isinstance(_lowercase , _lowercase ):
raise TypeError("A Matrix can only be raised to the power of an int" )
if not self.is_square:
raise ValueError("Only square matrices can be raised to a power" )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
"Only invertable matrices can be raised to a negative power" )
UpperCAmelCase__ = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def _UpperCAmelCase ( cls : List[Any] , _lowercase : list[int] , _lowercase : list[int] ):
"""simple docstring"""
return sum(row[i] * column[i] for i in range(len(_lowercase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 277 | 1 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase_ :
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=13 , SCREAMING_SNAKE_CASE__=7 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=99 , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=37 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=1_28 , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=None , ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = parent
SCREAMING_SNAKE_CASE__ : Any = batch_size
SCREAMING_SNAKE_CASE__ : Any = seq_length
SCREAMING_SNAKE_CASE__ : Tuple = is_training
SCREAMING_SNAKE_CASE__ : str = use_input_mask
SCREAMING_SNAKE_CASE__ : Optional[Any] = use_token_type_ids
SCREAMING_SNAKE_CASE__ : str = use_labels
SCREAMING_SNAKE_CASE__ : Dict = vocab_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE__ : Any = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Dict = num_attention_heads
SCREAMING_SNAKE_CASE__ : List[str] = intermediate_size
SCREAMING_SNAKE_CASE__ : Any = hidden_act
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[int] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Any = type_vocab_size
SCREAMING_SNAKE_CASE__ : str = type_sequence_label_size
SCREAMING_SNAKE_CASE__ : Any = initializer_range
SCREAMING_SNAKE_CASE__ : Tuple = num_labels
SCREAMING_SNAKE_CASE__ : Any = num_choices
SCREAMING_SNAKE_CASE__ : List[Any] = scope
def __magic_name__ (self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : List[Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__ : Dict = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ : Optional[int] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE__ : List[str] = None
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
SCREAMING_SNAKE_CASE__ : Optional[int] = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ : Dict = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__ : str = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __magic_name__ (self ) -> int:
"""simple docstring"""
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE__ , initializer_range=self.initializer_range , )
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) : List[Any] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = True
SCREAMING_SNAKE_CASE__ : str = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
SCREAMING_SNAKE_CASE__ : Any = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = NezhaModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[Any] = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = True
SCREAMING_SNAKE_CASE__ : int = NezhaModel(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
SCREAMING_SNAKE_CASE__ : int = model(
SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , encoder_hidden_states=SCREAMING_SNAKE_CASE__ , encoder_attention_mask=SCREAMING_SNAKE_CASE__ , )
SCREAMING_SNAKE_CASE__ : List[str] = model(
SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , encoder_hidden_states=SCREAMING_SNAKE_CASE__ , )
SCREAMING_SNAKE_CASE__ : Dict = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = NezhaForMaskedLM(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = NezhaForNextSentencePrediction(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[int] = model(
SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = NezhaForPreTraining(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
SCREAMING_SNAKE_CASE__ : str = model(
SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ , next_sentence_label=SCREAMING_SNAKE_CASE__ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = NezhaForQuestionAnswering(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(
SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , start_positions=SCREAMING_SNAKE_CASE__ , end_positions=SCREAMING_SNAKE_CASE__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.num_labels
SCREAMING_SNAKE_CASE__ : List[str] = NezhaForSequenceClassification(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
SCREAMING_SNAKE_CASE__ : str = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.num_labels
SCREAMING_SNAKE_CASE__ : Tuple = NezhaForTokenClassification(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
SCREAMING_SNAKE_CASE__ : str = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.num_choices
SCREAMING_SNAKE_CASE__ : Optional[int] = NezhaForMultipleChoice(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
SCREAMING_SNAKE_CASE__ : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ : List[str] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ : Any = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ : Tuple = model(
SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __magic_name__ (self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) : Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE__ : Dict = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ (a__ , a__ , a__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : Tuple = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
__UpperCamelCase : List[str] = (
{
'''feature-extraction''': NezhaModel,
'''fill-mask''': NezhaForMaskedLM,
'''question-answering''': NezhaForQuestionAnswering,
'''text-classification''': NezhaForSequenceClassification,
'''token-classification''': NezhaForTokenClassification,
'''zero-shot''': NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase : Tuple = True
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = super()._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_labels=SCREAMING_SNAKE_CASE__ )
if return_labels:
if model_class in get_values(SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : Dict = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ )
return inputs_dict
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = NezhaModelTester(self )
SCREAMING_SNAKE_CASE__ : Optional[int] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , hidden_size=37 )
def __magic_name__ (self ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) : Tuple = self.model_tester.prepare_config_and_inputs_for_decoder()
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
self.model_tester.create_and_check_model_as_decoder(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , )
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE__ )
@slow
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : str = NezhaModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
@slow
@require_torch_gpu
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
SCREAMING_SNAKE_CASE__ : List[str] = True
SCREAMING_SNAKE_CASE__ : Dict = model_class(config=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Any = self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[Any] = torch.jit.trace(
SCREAMING_SNAKE_CASE__ , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(SCREAMING_SNAKE_CASE__ , os.path.join(SCREAMING_SNAKE_CASE__ , """bert.pt""" ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.jit.load(os.path.join(SCREAMING_SNAKE_CASE__ , """bert.pt""" ) , map_location=SCREAMING_SNAKE_CASE__ )
loaded(inputs_dict["""input_ids"""].to(SCREAMING_SNAKE_CASE__ ) , inputs_dict["""attention_mask"""].to(SCREAMING_SNAKE_CASE__ ) )
@require_torch
class lowerCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
@slow
def __magic_name__ (self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = NezhaModel.from_pretrained("""sijunhe/nezha-cn-base""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE__ : Dict = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Any = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ )[0]
SCREAMING_SNAKE_CASE__ : int = torch.Size((1, 6, 7_68) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor([[[0.0685, 0.2441, 0.1102], [0.0600, 0.1906, 0.1349], [0.0221, 0.0819, 0.0586]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , SCREAMING_SNAKE_CASE__ , atol=1E-4 ) )
@slow
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = NezhaForMaskedLM.from_pretrained("""sijunhe/nezha-cn-base""" )
SCREAMING_SNAKE_CASE__ : str = torch.tensor([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE__ : Tuple = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Dict = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ )[0]
SCREAMING_SNAKE_CASE__ : str = torch.Size((1, 6, 2_11_28) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor(
[[-2.7939, -1.7902, -2.2189], [-2.8585, -1.8908, -2.3723], [-2.6499, -1.7750, -2.2558]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , SCREAMING_SNAKE_CASE__ , atol=1E-4 ) )
| 223 |
"""simple docstring"""
import math
class lowerCAmelCase_ :
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__=0 ) -> str: # a graph with Node 0,1,...,N-1
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = n
SCREAMING_SNAKE_CASE__ : Tuple = [
[math.inf for j in range(0 , SCREAMING_SNAKE_CASE__ )] for i in range(0 , SCREAMING_SNAKE_CASE__ )
] # adjacency matrix for weight
SCREAMING_SNAKE_CASE__ : List[str] = [
[math.inf for j in range(0 , SCREAMING_SNAKE_CASE__ )] for i in range(0 , SCREAMING_SNAKE_CASE__ )
] # dp[i][j] stores minimum distance from i to j
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = w
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
SCREAMING_SNAKE_CASE__ : int = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[str]:
"""simple docstring"""
return self.dp[u][v]
if __name__ == "__main__":
UpperCAmelCase__ : Tuple = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 1_0)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 1_0)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 223 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCamelCase_ = {'''configuration_swin''': ['''SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''SwinConfig''', '''SwinOnnxConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''SWIN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SwinForImageClassification''',
'''SwinForMaskedImageModeling''',
'''SwinModel''',
'''SwinPreTrainedModel''',
'''SwinBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFSwinForImageClassification''',
'''TFSwinForMaskedImageModeling''',
'''TFSwinModel''',
'''TFSwinPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 86 |
def __magic_name__ ( __a : str , __a : str ):
'''simple docstring'''
UpperCamelCase__ = len(__a )
UpperCamelCase__ = len(__a )
UpperCamelCase__ = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
UpperCamelCase__ = True
for i in range(__a ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
UpperCamelCase__ = True
if a[i].islower():
UpperCamelCase__ = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 86 | 1 |
lowerCamelCase =8.31_4462 # Unit - J mol-1 K-1
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError('''Invalid inputs. Enter positive value.''' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError('''Invalid inputs. Enter positive value.''' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 285 |
"""simple docstring"""
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class SCREAMING_SNAKE_CASE_ ( __a , unittest.TestCase ):
"""simple docstring"""
__lowercase : Dict = BertJapaneseTokenizer
__lowercase : List[str] = False
__lowercase : List[Any] = True
def snake_case_ ( self):
super().setUp()
__SCREAMING_SNAKE_CASE = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""こんにちは""",
"""こん""",
"""にちは""",
"""ばんは""",
"""##こん""",
"""##にちは""",
"""##ばんは""",
"""世界""",
"""##世界""",
"""、""",
"""##、""",
"""。""",
"""##。""",
]
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens]))
def snake_case_ ( self , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = """こんにちは、世界。 \nこんばんは、世界。"""
__SCREAMING_SNAKE_CASE = """こんにちは 、 世界 。 こんばんは 、 世界 。"""
return input_text, output_text
def snake_case_ ( self , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = self.get_input_output_texts(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.decode(lowerCAmelCase__ , clean_up_tokenization_spaces=lowerCAmelCase__)
return text, ids
def snake_case_ ( self):
pass # TODO add if relevant
def snake_case_ ( self):
pass # TODO add if relevant
def snake_case_ ( self):
pass # TODO add if relevant
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file)
__SCREAMING_SNAKE_CASE = tokenizer.tokenize("""こんにちは、世界。\nこんばんは、世界。""")
self.assertListEqual(lowerCAmelCase__ , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""])
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4])
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file , word_tokenizer_type="""mecab""")
self.assertIsNotNone(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = """こんにちは、世界。\nこんばんは、世界。"""
__SCREAMING_SNAKE_CASE = tokenizer.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""])
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4])
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , """tokenizer.bin""")
with open(lowerCAmelCase__ , """wb""") as handle:
pickle.dump(lowerCAmelCase__ , lowerCAmelCase__)
with open(lowerCAmelCase__ , """rb""") as handle:
__SCREAMING_SNAKE_CASE = pickle.load(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer_new.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = MecabTokenizer(mecab_dic="""ipadic""")
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , ["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def snake_case_ ( self):
try:
__SCREAMING_SNAKE_CASE = MecabTokenizer(mecab_dic="""unidic_lite""")
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def snake_case_ ( self):
try:
__SCREAMING_SNAKE_CASE = MecabTokenizer(mecab_dic="""unidic""")
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = MecabTokenizer(do_lower_case=lowerCAmelCase__ , mecab_dic="""ipadic""")
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , ["""アップルストア""", """で""", """iphone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def snake_case_ ( self):
try:
__SCREAMING_SNAKE_CASE = MecabTokenizer(
do_lower_case=lowerCAmelCase__ , normalize_text=lowerCAmelCase__ , mecab_option="""-d /usr/local/lib/mecab/dic/jumandic""")
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , ["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = MecabTokenizer(normalize_text=lowerCAmelCase__ , mecab_dic="""ipadic""")
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , ["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """ """, """。"""] , )
@require_sudachi
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file , word_tokenizer_type="""sudachi""")
self.assertIsNotNone(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = """こんにちは、世界。\nこんばんは、世界。"""
__SCREAMING_SNAKE_CASE = tokenizer.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""])
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4])
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , """tokenizer.bin""")
with open(lowerCAmelCase__ , """wb""") as handle:
pickle.dump(lowerCAmelCase__ , lowerCAmelCase__)
with open(lowerCAmelCase__ , """rb""") as handle:
__SCREAMING_SNAKE_CASE = pickle.load(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer_new.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
@require_sudachi
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = SudachiTokenizer(sudachi_dict_type="""core""")
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , [""" """, """\t""", """アップル""", """ストア""", """で""", """iPhone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """ """, """。""", """ """, """ """] , )
@require_sudachi
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = SudachiTokenizer(sudachi_dict_type="""core""" , sudachi_split_mode="""A""")
self.assertListEqual(tokenizer.tokenize("""外国人参政権""") , ["""外国""", """人""", """参政""", """権"""])
@require_sudachi
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = SudachiTokenizer(sudachi_dict_type="""core""" , sudachi_split_mode="""B""")
self.assertListEqual(tokenizer.tokenize("""外国人参政権""") , ["""外国人""", """参政権"""])
@require_sudachi
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = SudachiTokenizer(sudachi_dict_type="""core""" , sudachi_split_mode="""C""")
self.assertListEqual(tokenizer.tokenize("""外国人参政権""") , ["""外国人参政権"""])
@require_sudachi
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = SudachiTokenizer(do_lower_case=lowerCAmelCase__ , sudachi_dict_type="""core""")
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , [""" """, """\t""", """アップル""", """ストア""", """で""", """iphone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """ """, """。""", """ """, """ """] , )
@require_sudachi
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = SudachiTokenizer(normalize_text=lowerCAmelCase__ , sudachi_dict_type="""core""")
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , [""" """, """\t""", """アップル""", """ストア""", """で""", """iPhone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """\u3000""", """。""", """ """, """ """] , )
@require_sudachi
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = SudachiTokenizer(trim_whitespace=lowerCAmelCase__ , sudachi_dict_type="""core""")
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
@require_jumanpp
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file , word_tokenizer_type="""jumanpp""")
self.assertIsNotNone(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = """こんにちは、世界。\nこんばんは、世界。"""
__SCREAMING_SNAKE_CASE = tokenizer.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""])
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4])
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , """tokenizer.bin""")
with open(lowerCAmelCase__ , """wb""") as handle:
pickle.dump(lowerCAmelCase__ , lowerCAmelCase__)
with open(lowerCAmelCase__ , """rb""") as handle:
__SCREAMING_SNAKE_CASE = pickle.load(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer_new.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
@require_jumanpp
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
@require_jumanpp
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = JumanppTokenizer(do_lower_case=lowerCAmelCase__)
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , ["""アップル""", """ストア""", """で""", """iphone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
@require_jumanpp
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = JumanppTokenizer(normalize_text=lowerCAmelCase__)
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , ["""ア""", """ッ""", """フ""", """゚""", """ル""", """ストア""", """で""", """iPhone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
@require_jumanpp
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = JumanppTokenizer(trim_whitespace=lowerCAmelCase__)
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れた""", """。"""] , )
@require_jumanpp
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize("""ありがとうございますm(_ _)m見つけるのが大変です。""") , ["""ありがとう""", """ございます""", """m(_ _)m""", """見つける""", """の""", """が""", """大変です""", """。"""] , )
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = ["""[UNK]""", """[CLS]""", """[SEP]""", """こんにちは""", """こん""", """にちは""", """ばんは""", """##こん""", """##にちは""", """##ばんは"""]
__SCREAMING_SNAKE_CASE = {}
for i, token in enumerate(lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = i
__SCREAMING_SNAKE_CASE = WordpieceTokenizer(vocab=lowerCAmelCase__ , unk_token="""[UNK]""")
self.assertListEqual(tokenizer.tokenize("""""") , [])
self.assertListEqual(tokenizer.tokenize("""こんにちは""") , ["""こんにちは"""])
self.assertListEqual(tokenizer.tokenize("""こんばんは""") , ["""こん""", """##ばんは"""])
self.assertListEqual(tokenizer.tokenize("""こんばんは こんばんにちは こんにちは""") , ["""こん""", """##ばんは""", """[UNK]""", """こんにちは"""])
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = BertJapaneseTokenizer.from_pretrained("""nlp-waseda/roberta-base-japanese-with-auto-jumanpp""")
__SCREAMING_SNAKE_CASE = tokenizer.subword_tokenizer
__SCREAMING_SNAKE_CASE = subword_tokenizer.tokenize("""国境 の 長い トンネル を 抜ける と 雪国 であった 。""")
self.assertListEqual(lowerCAmelCase__ , ["""▁国境""", """▁の""", """▁長い""", """▁トンネル""", """▁を""", """▁抜ける""", """▁と""", """▁雪""", """国""", """▁であった""", """▁。"""])
__SCREAMING_SNAKE_CASE = subword_tokenizer.tokenize("""こんばんは こんばん にち は こんにちは""")
self.assertListEqual(lowerCAmelCase__ , ["""▁こん""", """ばん""", """は""", """▁こん""", """ばん""", """▁に""", """ち""", """▁は""", """▁こんにちは"""])
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained("""cl-tohoku/bert-base-japanese""")
__SCREAMING_SNAKE_CASE = tokenizer.encode("""ありがとう。""" , add_special_tokens=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.encode("""どういたしまして。""" , add_special_tokens=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ , lowerCAmelCase__)
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class SCREAMING_SNAKE_CASE_ ( __a , unittest.TestCase ):
"""simple docstring"""
__lowercase : str = BertJapaneseTokenizer
__lowercase : int = False
def snake_case_ ( self):
super().setUp()
__SCREAMING_SNAKE_CASE = ["""[UNK]""", """[CLS]""", """[SEP]""", """こ""", """ん""", """に""", """ち""", """は""", """ば""", """世""", """界""", """、""", """。"""]
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens]))
def snake_case_ ( self , **lowerCAmelCase__):
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type="""character""" , **lowerCAmelCase__)
def snake_case_ ( self , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = """こんにちは、世界。 \nこんばんは、世界。"""
__SCREAMING_SNAKE_CASE = """こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。"""
return input_text, output_text
def snake_case_ ( self):
pass # TODO add if relevant
def snake_case_ ( self):
pass # TODO add if relevant
def snake_case_ ( self):
pass # TODO add if relevant
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file , subword_tokenizer_type="""character""")
__SCREAMING_SNAKE_CASE = tokenizer.tokenize("""こんにちは、世界。 \nこんばんは、世界。""")
self.assertListEqual(
lowerCAmelCase__ , ["""こ""", """ん""", """に""", """ち""", """は""", """、""", """世""", """界""", """。""", """こ""", """ん""", """ば""", """ん""", """は""", """、""", """世""", """界""", """。"""])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase__) , [3, 4, 5, 6, 7, 1_1, 9, 1_0, 1_2, 3, 4, 8, 4, 7, 1_1, 9, 1_0, 1_2])
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = ["""[UNK]""", """[CLS]""", """[SEP]""", """こ""", """ん""", """に""", """ち""", """は""", """ば""", """世""", """界""", """、""", """。"""]
__SCREAMING_SNAKE_CASE = {}
for i, token in enumerate(lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = i
__SCREAMING_SNAKE_CASE = CharacterTokenizer(vocab=lowerCAmelCase__ , unk_token="""[UNK]""")
self.assertListEqual(tokenizer.tokenize("""""") , [])
self.assertListEqual(tokenizer.tokenize("""こんにちは""") , ["""こ""", """ん""", """に""", """ち""", """は"""])
self.assertListEqual(tokenizer.tokenize("""こんにちほ""") , ["""こ""", """ん""", """に""", """ち""", """[UNK]"""])
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained("""cl-tohoku/bert-base-japanese-char""")
__SCREAMING_SNAKE_CASE = tokenizer.encode("""ありがとう。""" , add_special_tokens=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.encode("""どういたしまして。""" , add_special_tokens=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ , lowerCAmelCase__)
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = """cl-tohoku/bert-base-japanese"""
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(lowerCAmelCase__)
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__)
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = """cl-tohoku/bert-base-japanese"""
with self.assertLogs("""transformers""" , level="""WARNING""") as cm:
BertTokenizer.from_pretrained(lowerCAmelCase__)
self.assertTrue(
cm.records[0].message.startswith(
"""The tokenizer class you load from this checkpoint is not the same type as the class this function"""
""" is called from."""))
__SCREAMING_SNAKE_CASE = """bert-base-cased"""
with self.assertLogs("""transformers""" , level="""WARNING""") as cm:
BertJapaneseTokenizer.from_pretrained(lowerCAmelCase__)
self.assertTrue(
cm.records[0].message.startswith(
"""The tokenizer class you load from this checkpoint is not the same type as the class this function"""
""" is called from."""))
| 155 | 0 |
import sys
def __UpperCAmelCase ( snake_case_ : List[str] ):
'''simple docstring'''
UpperCAmelCase: Optional[Any] = len(snake_case_ )
UpperCAmelCase: Optional[int] = [[0 for x in range(snake_case_ )] for x in range(snake_case_ )]
UpperCAmelCase: List[str] = [[0 for x in range(snake_case_ )] for x in range(snake_case_ )]
for chain_length in range(2 , snake_case_ ):
for a in range(1 , n - chain_length + 1 ):
UpperCAmelCase: int = a + chain_length - 1
UpperCAmelCase: Any = sys.maxsize
for c in range(snake_case_ , snake_case_ ):
UpperCAmelCase: Union[str, Any] = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
UpperCAmelCase: Optional[Any] = cost
UpperCAmelCase: Optional[int] = c
return matrix, sol
def __UpperCAmelCase ( snake_case_ : Union[str, Any] , snake_case_ : Any , snake_case_ : Optional[Any] ):
'''simple docstring'''
if i == j:
print("A" + str(snake_case_ ) , end=" " )
else:
print("(" , end=" " )
print_optiomal_solution(snake_case_ , snake_case_ , optimal_solution[i][j] )
print_optiomal_solution(snake_case_ , optimal_solution[i][j] + 1 , snake_case_ )
print(")" , end=" " )
def __UpperCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase: int = [3_0, 3_5, 1_5, 5, 1_0, 2_0, 2_5]
UpperCAmelCase: int = len(snake_case_ )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
UpperCAmelCase: Optional[Any] = matrix_chain_order(snake_case_ )
print("No. of Operation required: " + str(matrix[1][n - 1] ) )
print_optiomal_solution(snake_case_ , 1 , n - 1 )
if __name__ == "__main__":
main()
| 704 |
from __future__ import annotations
import numpy as np
def __UpperCAmelCase ( snake_case_ : list[float] ):
'''simple docstring'''
return np.maximum(0 , snake_case_ )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 166 | 0 |
import pprint
import requests
_A = "https://zenquotes.io/api"
def lowerCamelCase__ ( ):
"""simple docstring"""
return requests.get(API_ENDPOINT_URL + "/today" ).json()
def lowerCamelCase__ ( ):
"""simple docstring"""
return requests.get(API_ENDPOINT_URL + "/random" ).json()
if __name__ == "__main__":
_A = random_quotes()
pprint.pprint(response)
| 290 |
from math import loga
def lowerCamelCase__ ( __lowerCAmelCase : int ):
"""simple docstring"""
if a < 0:
raise ValueError("Input value must be a positive integer" )
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError("Input value must be a 'int' type" )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 290 | 1 |
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
lowercase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
def a ( A__ : Union[List, PIL.Image.Image, torch.Tensor] ) -> List[Any]:
"""simple docstring"""
warnings.warn(
'The preprocess method is deprecated and will be removed in a future version. Please'
' use VaeImageProcessor.preprocess instead' , A__ , )
if isinstance(A__ , torch.Tensor ):
return image
elif isinstance(A__ , PIL.Image.Image ):
_lowercase =[image]
if isinstance(image[0] , PIL.Image.Image ):
_lowercase , _lowercase =image[0].size
_lowercase , _lowercase =(x - x % 8 for x in (w, h)) # resize to integer multiple of 8
_lowercase =[np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image]
_lowercase =np.concatenate(A__ , axis=0 )
_lowercase =np.array(A__ ).astype(np.floataa ) / 255.0
_lowercase =image.transpose(0 , 3 , 1 , 2 )
_lowercase =2.0 * image - 1.0
_lowercase =torch.from_numpy(A__ )
elif isinstance(image[0] , torch.Tensor ):
_lowercase =torch.cat(A__ , dim=0 )
return image
def a ( A__ : Union[List, PIL.Image.Image, torch.Tensor] ) -> List[Any]:
"""simple docstring"""
if isinstance(A__ , torch.Tensor ):
return mask
elif isinstance(A__ , PIL.Image.Image ):
_lowercase =[mask]
if isinstance(mask[0] , PIL.Image.Image ):
_lowercase , _lowercase =mask[0].size
_lowercase , _lowercase =(x - x % 32 for x in (w, h)) # resize to integer multiple of 32
_lowercase =[np.array(m.convert('L' ).resize((w, h) , resample=PIL_INTERPOLATION['nearest'] ) )[None, :] for m in mask]
_lowercase =np.concatenate(A__ , axis=0 )
_lowercase =mask.astype(np.floataa ) / 255.0
_lowercase =0
_lowercase =1
_lowercase =torch.from_numpy(A__ )
elif isinstance(mask[0] , torch.Tensor ):
_lowercase =torch.cat(A__ , dim=0 )
return mask
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE ):
_a = 42
_a = 42
def __init__( self , lowerCAmelCase , lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
super().__init__()
self.register_modules(unet=lowerCAmelCase , scheduler=lowerCAmelCase )
@torch.no_grad()
def __call__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 250 , lowerCAmelCase = 0.0 , lowerCAmelCase = 10 , lowerCAmelCase = 10 , lowerCAmelCase = None , lowerCAmelCase = "pil" , lowerCAmelCase = True , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
_lowercase =image
_lowercase =_preprocess_image(lowerCAmelCase )
_lowercase =original_image.to(device=self.device , dtype=self.unet.dtype )
_lowercase =_preprocess_mask(lowerCAmelCase )
_lowercase =mask_image.to(device=self.device , dtype=self.unet.dtype )
_lowercase =original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(lowerCAmelCase , lowerCAmelCase ) and len(lowerCAmelCase ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(lowerCAmelCase )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
_lowercase =original_image.shape
_lowercase =randn_tensor(lowerCAmelCase , generator=lowerCAmelCase , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , self.device )
_lowercase =eta
_lowercase =self.scheduler.timesteps[0] + 1
_lowercase =generator[0] if isinstance(lowerCAmelCase , lowerCAmelCase ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
_lowercase =self.unet(lowerCAmelCase , lowerCAmelCase ).sample
# compute previous image: x_t -> x_t-1
_lowercase =self.scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
_lowercase =self.scheduler.undo_step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
_lowercase =t
_lowercase =(image / 2 + 0.5).clamp(0 , 1 )
_lowercase =image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_lowercase =self.numpy_to_pil(lowerCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCAmelCase )
| 705 |
def a ( A__ : str , A__ : int ) -> list[str]:
"""simple docstring"""
return [sentence[i : i + ngram_size] for i in range(len(A__ ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 380 | 0 |
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class UpperCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
_lowercase : List[Any] = inspect.getfile(accelerate.test_utils )
_lowercase : int = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] )
_lowercase : List[Any] = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
_lowercase : Optional[Any] = F'\n {self.test_dir}/xla_spawn.py\n --num_cores 8\n {self.test_file_path}\n '.split()
_lowercase : Optional[Any] = [sys.executable] + distributed_args
execute_subprocess_async(UpperCamelCase , env=os.environ.copy() ) | 322 |
def UpperCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) -> float:
'''simple docstring'''
if principal <= 0:
raise Exception('''Principal borrowed must be > 0''' )
if rate_per_annum < 0:
raise Exception('''Rate of interest must be >= 0''' )
if years_to_repay <= 0 or not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
raise Exception('''Years to repay must be an integer > 0''' )
# Yearly rate is divided by 12 to get monthly rate
_lowercase : str = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
_lowercase : str = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod() | 322 | 1 |
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
UpperCAmelCase__ : int = "bart"
UpperCAmelCase__ : int = True
@st.cache(allow_output_mutation=__UpperCAmelCase )
def A ( ) -> List[str]:
'''simple docstring'''
if LOAD_DENSE_INDEX:
__snake_case = AutoTokenizer.from_pretrained('yjernite/retribert-base-uncased' )
__snake_case = AutoModel.from_pretrained('yjernite/retribert-base-uncased' ).to('cuda:0' )
__snake_case = qar_model.eval()
else:
__snake_case , __snake_case = (None, None)
if MODEL_TYPE == "bart":
__snake_case = AutoTokenizer.from_pretrained('yjernite/bart_eli5' )
__snake_case = AutoModelForSeqaSeqLM.from_pretrained('yjernite/bart_eli5' ).to('cuda:0' )
__snake_case = torch.load('seq2seq_models/eli5_bart_model_blm_2.pth' )
sas_model.load_state_dict(save_dict['model'] )
__snake_case = sas_model.eval()
else:
__snake_case , __snake_case = make_qa_sas_model(
model_name='t5-small' , from_file='seq2seq_models/eli5_t5_model_1024_4.pth' , device='cuda:0' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=__UpperCAmelCase )
def A ( ) -> List[Any]:
'''simple docstring'''
if LOAD_DENSE_INDEX:
__snake_case = faiss.StandardGpuResources()
__snake_case = datasets.load_dataset(path='wiki_snippets' , name='wiki40b_en_100_0' )['train']
__snake_case = np.memmap(
'wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat' , dtype='float32' , mode='r' , shape=(wikiaab_passages.num_rows, 128) , )
__snake_case = faiss.IndexFlatIP(128 )
__snake_case = faiss.index_cpu_to_gpu(__UpperCAmelCase , 1 , __UpperCAmelCase )
wikiaab_gpu_index_flat.add(__UpperCAmelCase ) # TODO fix for larger GPU
else:
__snake_case , __snake_case = (None, None)
__snake_case = Elasticsearch([{'host': 'localhost', 'port': '9200'}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=__UpperCAmelCase )
def A ( ) -> int:
'''simple docstring'''
__snake_case = datasets.load_dataset('eli5' , name='LFQA_reddit' )
__snake_case = elia['train_eli5']
__snake_case = np.memmap(
'eli5_questions_reps.dat' , dtype='float32' , mode='r' , shape=(elia_train.num_rows, 128) )
__snake_case = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(__UpperCAmelCase )
return (elia_train, eli5_train_q_index)
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Any = load_indexes()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[str] = load_models()
UpperCAmelCase__ , UpperCAmelCase__ : Dict = load_train_data()
def A ( snake_case__ : List[Any] , snake_case__ : List[Any]=10 ) -> Optional[Any]:
'''simple docstring'''
__snake_case = embed_questions_for_retrieval([question] , __UpperCAmelCase , __UpperCAmelCase )
__snake_case , __snake_case = eli5_train_q_index.search(__UpperCAmelCase , __UpperCAmelCase )
__snake_case = [elia_train[int(__UpperCAmelCase )] for i in I[0]]
return nn_examples
def A ( snake_case__ : str , snake_case__ : List[str]="wiki40b" , snake_case__ : str="dense" , snake_case__ : Union[str, Any]=10 ) -> Optional[int]:
'''simple docstring'''
if source == "none":
__snake_case , __snake_case = (' <P> '.join(['' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
__snake_case , __snake_case = query_qa_dense_index(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
else:
__snake_case , __snake_case = query_es_index(
__UpperCAmelCase , __UpperCAmelCase , index_name='english_wiki40b_snippets_100w' , n_results=__UpperCAmelCase , )
__snake_case = [
(res['article_title'], res['section_title'].strip(), res['score'], res['passage_text']) for res in hit_lst
]
__snake_case = 'question: {} context: {}'.format(__UpperCAmelCase , __UpperCAmelCase )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda snake_case__ : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda snake_case__ : None),
} )
def A ( snake_case__ : Optional[int] , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : Optional[Any]=64 , snake_case__ : Optional[int]=256 , snake_case__ : List[Any]=False , snake_case__ : int=2 , snake_case__ : Union[str, Any]=0.95 , snake_case__ : Optional[int]=0.8 ) -> Any:
'''simple docstring'''
with torch.no_grad():
__snake_case = qa_sas_generate(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , num_answers=1 , num_beams=__UpperCAmelCase , min_len=__UpperCAmelCase , max_len=__UpperCAmelCase , do_sample=__UpperCAmelCase , temp=__UpperCAmelCase , top_p=__UpperCAmelCase , top_k=__UpperCAmelCase , max_input_length=1024 , device='cuda:0' , )[0]
return (answer, support_list)
st.title("Long Form Question Answering with ELI5")
# Start sidebar
UpperCAmelCase__ : List[Any] = "<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"
UpperCAmelCase__ : Optional[int] = "\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class=\"img-container\"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
UpperCAmelCase__ : str = "\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n"
st.sidebar.markdown(description, unsafe_allow_html=True)
UpperCAmelCase__ : Any = [
"Answer the question",
"View the retrieved document only",
"View the most similar ELI5 question and answer",
"Show me everything, please!",
]
UpperCAmelCase__ : List[Any] = st.sidebar.checkbox("Demo options")
if demo_options:
UpperCAmelCase__ : List[Any] = st.sidebar.selectbox(
"",
action_list,
index=3,
)
UpperCAmelCase__ : Tuple = action_list.index(action_st)
UpperCAmelCase__ : Optional[int] = st.sidebar.selectbox(
"",
["Show full text of passages", "Show passage section titles"],
index=0,
)
UpperCAmelCase__ : Optional[int] = show_type == "Show full text of passages"
else:
UpperCAmelCase__ : Optional[Any] = 3
UpperCAmelCase__ : Optional[int] = True
UpperCAmelCase__ : Union[str, Any] = st.sidebar.checkbox("Retrieval options")
if retrieval_options:
UpperCAmelCase__ : int = "\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n "
st.sidebar.markdown(retriever_info)
UpperCAmelCase__ : List[str] = st.sidebar.selectbox("Which Wikipedia format should the model use?", ["wiki40b", "none"])
UpperCAmelCase__ : Tuple = st.sidebar.selectbox("Which Wikipedia indexer should the model use?", ["dense", "sparse", "mixed"])
else:
UpperCAmelCase__ : Tuple = "wiki40b"
UpperCAmelCase__ : Any = "dense"
UpperCAmelCase__ : Optional[int] = "beam"
UpperCAmelCase__ : Any = 2
UpperCAmelCase__ : Optional[Any] = 64
UpperCAmelCase__ : int = 2_56
UpperCAmelCase__ : List[Any] = None
UpperCAmelCase__ : Union[str, Any] = None
UpperCAmelCase__ : Tuple = st.sidebar.checkbox("Generation options")
if generate_options:
UpperCAmelCase__ : List[Any] = "\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder's output probabilities.\n "
st.sidebar.markdown(generate_info)
UpperCAmelCase__ : Tuple = st.sidebar.selectbox("Would you like to use beam search or sample an answer?", ["beam", "sampled"])
UpperCAmelCase__ : str = st.sidebar.slider(
"Minimum generation length", min_value=8, max_value=2_56, value=64, step=8, format=None, key=None
)
UpperCAmelCase__ : Union[str, Any] = st.sidebar.slider(
"Maximum generation length", min_value=64, max_value=5_12, value=2_56, step=16, format=None, key=None
)
if sampled == "beam":
UpperCAmelCase__ : Tuple = st.sidebar.slider("Beam size", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
UpperCAmelCase__ : int = st.sidebar.slider(
"Nucleus sampling p", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
UpperCAmelCase__ : Union[str, Any] = st.sidebar.slider(
"Temperature", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
UpperCAmelCase__ : str = None
# start main text
UpperCAmelCase__ : int = [
"<MY QUESTION>",
"How do people make chocolate?",
"Why do we get a fever when we are sick?",
"How can different animals perceive different colors?",
"What is natural language processing?",
"What's the best way to treat a sunburn?",
"What exactly are vitamins ?",
"How does nuclear energy provide electricity?",
"What's the difference between viruses and bacteria?",
"Why are flutes classified as woodwinds when most of them are made out of metal ?",
"Why do people like drinking coffee even though it tastes so bad?",
"What happens when wine ages? How does it make the wine taste better?",
"If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?",
"How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?",
"How does New Zealand have so many large bird predators?",
]
UpperCAmelCase__ : Optional[Any] = st.selectbox(
"What would you like to ask? ---- select <MY QUESTION> to enter a new query",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
UpperCAmelCase__ : Union[str, Any] = st.text_input("Enter your question here:", "")
else:
UpperCAmelCase__ : List[str] = question_s
if st.button("Show me!"):
if action in [0, 1, 3]:
if index_type == "mixed":
UpperCAmelCase__ , UpperCAmelCase__ : int = make_support(question, source=wiki_source, method="dense", n_results=10)
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = make_support(question, source=wiki_source, method="sparse", n_results=10)
UpperCAmelCase__ : Any = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
UpperCAmelCase__ : Union[str, Any] = support_list[:10]
UpperCAmelCase__ : int = "<P> " + " <P> ".join([res[-1] for res in support_list])
else:
UpperCAmelCase__ , UpperCAmelCase__ : Any = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == "sampled"),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("### The model generated answer is:")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("--- \n ### The model is drawing information from the following Wikipedia passages:")
for i, res in enumerate(support_list):
UpperCAmelCase__ : Optional[int] = "https://en.wikipedia.org/wiki/{}".format(res[0].replace(" ", "_"))
UpperCAmelCase__ : Any = res[1].strip()
if sec_titles == "":
UpperCAmelCase__ : Optional[int] = "[{}]({})".format(res[0], wiki_url)
else:
UpperCAmelCase__ : Optional[int] = sec_titles.split(" & ")
UpperCAmelCase__ : Tuple = " & ".join(
["[{}]({}#{})".format(sec.strip(), wiki_url, sec.strip().replace(" ", "_")) for sec in sec_list]
)
st.markdown(
"{0:02d} - **Article**: {1:<18} <br> _Section_: {2}".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"> <span style=\"font-family:arial; font-size:10pt;\">" + res[-1] + "</span>", unsafe_allow_html=True
)
if action in [2, 3]:
UpperCAmelCase__ : str = find_nearest_training(question)
UpperCAmelCase__ : Tuple = nn_train_list[0]
st.markdown(
"--- \n ### The most similar question in the ELI5 training set was: \n\n {}".format(train_exple["title"])
)
UpperCAmelCase__ : Optional[Any] = [
"{}. {}".format(i + 1, " \n".join([line.strip() for line in ans.split("\n") if line.strip() != ""]))
for i, (ans, sc) in enumerate(zip(train_exple["answers"]["text"], train_exple["answers"]["score"]))
if i == 0 or sc > 2
]
st.markdown("##### Its answers were: \n\n {}".format("\n".join(answers_st)))
UpperCAmelCase__ : Tuple = "\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n"
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 713 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase__ : Optional[Any] = {
"configuration_clip": [
"CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPConfig",
"CLIPOnnxConfig",
"CLIPTextConfig",
"CLIPVisionConfig",
],
"processing_clip": ["CLIPProcessor"],
"tokenization_clip": ["CLIPTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Optional[int] = ["CLIPTokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Union[str, Any] = ["CLIPFeatureExtractor"]
UpperCAmelCase__ : Optional[int] = ["CLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Any = [
"CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPModel",
"CLIPPreTrainedModel",
"CLIPTextModel",
"CLIPTextModelWithProjection",
"CLIPVisionModel",
"CLIPVisionModelWithProjection",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : int = [
"TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCLIPModel",
"TFCLIPPreTrainedModel",
"TFCLIPTextModel",
"TFCLIPVisionModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Dict = [
"FlaxCLIPModel",
"FlaxCLIPPreTrainedModel",
"FlaxCLIPTextModel",
"FlaxCLIPTextPreTrainedModel",
"FlaxCLIPVisionModel",
"FlaxCLIPVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 676 | 0 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class lowerCamelCase( __snake_case ):
'''simple docstring'''
__magic_name__ = 42
class lowerCamelCase( __snake_case , __snake_case ):
'''simple docstring'''
@register_to_config
def __init__( self , snake_case_ = 6_5536 , snake_case_ = None , snake_case_ = 2 , snake_case_ = 2 , snake_case_ = 0 , snake_case_ = "fourier" , snake_case_ = True , snake_case_ = False , snake_case_ = 0.0 , snake_case_ = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , snake_case_ = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , snake_case_ = "UNetMidBlock1D" , snake_case_ = None , snake_case_ = (32, 32, 64) , snake_case_ = None , snake_case_ = 8 , snake_case_ = 1 , snake_case_ = False , ):
super().__init__()
_A = sample_size
# time
if time_embedding_type == "fourier":
_A = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=snake_case_ , log=snake_case_ , flip_sin_to_cos=snake_case_ )
_A = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
_A = Timesteps(
block_out_channels[0] , flip_sin_to_cos=snake_case_ , downscale_freq_shift=snake_case_ )
_A = block_out_channels[0]
if use_timestep_embedding:
_A = block_out_channels[0] * 4
_A = TimestepEmbedding(
in_channels=snake_case_ , time_embed_dim=snake_case_ , act_fn=snake_case_ , out_dim=block_out_channels[0] , )
_A = nn.ModuleList([] )
_A = None
_A = nn.ModuleList([] )
_A = None
# down
_A = in_channels
for i, down_block_type in enumerate(snake_case_ ):
_A = output_channel
_A = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
_A = i == len(snake_case_ ) - 1
_A = get_down_block(
snake_case_ , num_layers=snake_case_ , in_channels=snake_case_ , out_channels=snake_case_ , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(snake_case_ )
# mid
_A = get_mid_block(
snake_case_ , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=snake_case_ , add_downsample=snake_case_ , )
# up
_A = list(reversed(snake_case_ ) )
_A = reversed_block_out_channels[0]
if out_block_type is None:
_A = out_channels
else:
_A = block_out_channels[0]
for i, up_block_type in enumerate(snake_case_ ):
_A = output_channel
_A = (
reversed_block_out_channels[i + 1] if i < len(snake_case_ ) - 1 else final_upsample_channels
)
_A = i == len(snake_case_ ) - 1
_A = get_up_block(
snake_case_ , num_layers=snake_case_ , in_channels=snake_case_ , out_channels=snake_case_ , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(snake_case_ )
_A = output_channel
# out
_A = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
_A = get_out_block(
out_block_type=snake_case_ , num_groups_out=snake_case_ , embed_dim=block_out_channels[0] , out_channels=snake_case_ , act_fn=snake_case_ , fc_dim=block_out_channels[-1] // 4 , )
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ = True , ):
_A = timestep
if not torch.is_tensor(snake_case_ ):
_A = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(snake_case_ ) and len(timesteps.shape ) == 0:
_A = timesteps[None].to(sample.device )
_A = self.time_proj(snake_case_ )
if self.config.use_timestep_embedding:
_A = self.time_mlp(snake_case_ )
else:
_A = timestep_embed[..., None]
_A = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
_A = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
_A = ()
for downsample_block in self.down_blocks:
_A, _A = downsample_block(hidden_states=snake_case_ , temb=snake_case_ )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
_A = self.mid_block(snake_case_ , snake_case_ )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
_A = down_block_res_samples[-1:]
_A = down_block_res_samples[:-1]
_A = upsample_block(snake_case_ , res_hidden_states_tuple=snake_case_ , temb=snake_case_ )
# 5. post-process
if self.out_block:
_A = self.out_block(snake_case_ , snake_case_ )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=snake_case_ )
| 27 |
from collections.abc import Callable
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
_A = a
_A = b
if function(_SCREAMING_SNAKE_CASE ) == 0: # one of the a or b is a root for the function
return a
elif function(_SCREAMING_SNAKE_CASE ) == 0:
return b
elif (
function(_SCREAMING_SNAKE_CASE ) * function(_SCREAMING_SNAKE_CASE ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('could not find root in given interval.' )
else:
_A = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(_SCREAMING_SNAKE_CASE ) == 0:
return mid
elif function(_SCREAMING_SNAKE_CASE ) * function(_SCREAMING_SNAKE_CASE ) < 0:
_A = mid
else:
_A = mid
_A = start + (end - start) / 2.0
return mid
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1_000))
import doctest
doctest.testmod()
| 27 | 1 |
"""simple docstring"""
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__) # pylint: disable=invalid-name
SCREAMING_SNAKE_CASE_ = """
Examples:
```py
>>> import torch
>>> import numpy as np
>>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline
>>> from transformers import pipeline
>>> from diffusers.utils import load_image
>>> def make_hint(image, depth_estimator):
... image = depth_estimator(image)[\"depth\"]
... image = np.array(image)
... image = image[:, :, None]
... image = np.concatenate([image, image, image], axis=2)
... detected_map = torch.from_numpy(image).float() / 255.0
... hint = detected_map.permute(2, 0, 1)
... return hint
>>> depth_estimator = pipeline(\"depth-estimation\")
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16
... )
>>> pipe_prior = pipe_prior.to(\"cuda\")
>>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-controlnet-depth\", torch_dtype=torch.float16
... )
>>> pipe = pipe.to(\"cuda\")
>>> img = load_image(
... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"
... \"/kandinsky/cat.png\"
... ).resize((768, 768))
>>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to(\"cuda\")
>>> prompt = \"A robot, 4k photo\"
>>> negative_prior_prompt = \"lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature\"
>>> generator = torch.Generator(device=\"cuda\").manual_seed(43)
>>> image_emb, zero_image_emb = pipe_prior(
... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator
... ).to_tuple()
>>> images = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... hint=hint,
... num_inference_steps=50,
... generator=generator,
... height=768,
... width=768,
... ).images
>>> images[0].save(\"robot_cat.png\")
```
"""
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__=8 ) -> List[str]:
a_ : Optional[Any] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
a_ : List[str] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class snake_case_ ( a_ ):
def __init__( self , a_ , a_ , a_ , ):
super().__init__()
self.register_modules(
unet=a_ , scheduler=a_ , movq=a_ , )
a_ : Optional[int] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def snake_case_ ( self , a_ , a_ , a_ , a_ , a_ , a_ ):
if latents is None:
a_ : Optional[int] = randn_tensor(a_ , generator=a_ , device=a_ , dtype=a_ )
else:
if latents.shape != shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
a_ : Optional[int] = latents.to(a_ )
a_ : List[Any] = latents * scheduler.init_noise_sigma
return latents
def snake_case_ ( self , a_=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
a_ : str = torch.device(F"""cuda:{gpu_id}""" )
a_ : Dict = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(a_ , a_ )
def snake_case_ ( self , a_=0 ):
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
a_ : List[str] = torch.device(F"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=a_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
a_ : Tuple = None
for cpu_offloaded_model in [self.unet, self.movq]:
a_ , a_ : Optional[Any] = cpu_offload_with_hook(a_ , a_ , prev_module_hook=a_ )
# We'll offload the last model manually.
a_ : List[str] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def snake_case_ ( self ):
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(a_ , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(a_ )
def __call__( self , a_ , a_ , a_ , a_ = 5_1_2 , a_ = 5_1_2 , a_ = 1_0_0 , a_ = 4.0 , a_ = 1 , a_ = None , a_ = None , a_ = "pil" , a_ = True , ):
a_ : Dict = self._execution_device
a_ : List[str] = guidance_scale > 1.0
if isinstance(a_ , a_ ):
a_ : Optional[int] = torch.cat(a_ , dim=0 )
if isinstance(a_ , a_ ):
a_ : Optional[Any] = torch.cat(a_ , dim=0 )
if isinstance(a_ , a_ ):
a_ : str = torch.cat(a_ , dim=0 )
a_ : str = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
a_ : Dict = image_embeds.repeat_interleave(a_ , dim=0 )
a_ : Dict = negative_image_embeds.repeat_interleave(a_ , dim=0 )
a_ : Dict = hint.repeat_interleave(a_ , dim=0 )
a_ : List[str] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=a_ )
a_ : Dict = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=a_ )
self.scheduler.set_timesteps(a_ , device=a_ )
a_ : List[str] = self.scheduler.timesteps
a_ : Any = self.movq.config.latent_channels
a_ , a_ : Tuple = downscale_height_and_width(a_ , a_ , self.movq_scale_factor )
# create initial latent
a_ : str = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , a_ , a_ , a_ , self.scheduler , )
for i, t in enumerate(self.progress_bar(a_ ) ):
# expand the latents if we are doing classifier free guidance
a_ : Any = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
a_ : Any = {"image_embeds": image_embeds, "hint": hint}
a_ : Dict = self.unet(
sample=a_ , timestep=a_ , encoder_hidden_states=a_ , added_cond_kwargs=a_ , return_dict=a_ , )[0]
if do_classifier_free_guidance:
a_ , a_ : Union[str, Any] = noise_pred.split(latents.shape[1] , dim=1 )
a_ , a_ : Any = noise_pred.chunk(2 )
a_ , a_ : Tuple = variance_pred.chunk(2 )
a_ : str = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
a_ : int = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
a_ , a_ : List[str] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
a_ : Optional[int] = self.scheduler.step(
a_ , a_ , a_ , generator=a_ , )[0]
# post-processing
a_ : List[Any] = self.movq.decode(a_ , force_not_quantize=a_ )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
a_ : List[str] = image * 0.5 + 0.5
a_ : List[str] = image.clamp(0 , 1 )
a_ : List[str] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
a_ : int = self.numpy_to_pil(a_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a_ ) | 370 |
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE_ = get_tests_dir("""fixtures/test_sentencepiece_bpe_char.model""")
@require_sentencepiece
@require_tokenizers
class snake_case_ ( a_ ,unittest.TestCase ):
__lowerCAmelCase = SpeechTaTokenizer
__lowerCAmelCase = False
__lowerCAmelCase = True
def snake_case_ ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
a_ : Any = SpeechTaTokenizer(a_ )
a_ : Optional[int] = AddedToken("<mask>" , lstrip=a_ , rstrip=a_ )
a_ : Any = mask_token
tokenizer.add_special_tokens({"mask_token": mask_token} )
tokenizer.add_tokens(["<ctc_blank>"] )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case_ ( self , a_ ):
a_ : Tuple = "this is a test"
a_ : Any = "this is a test"
return input_text, output_text
def snake_case_ ( self , a_ , a_=False , a_=2_0 , a_=5 ):
a_ , a_ : Optional[Any] = self.get_input_output_texts(a_ )
a_ : Optional[Any] = tokenizer.encode(a_ , add_special_tokens=a_ )
a_ : Dict = tokenizer.decode(a_ , clean_up_tokenization_spaces=a_ )
return text, ids
def snake_case_ ( self ):
a_ : List[Any] = "<pad>"
a_ : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a_ ) , a_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a_ ) , a_ )
def snake_case_ ( self ):
a_ : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-4] , "œ" )
self.assertEqual(vocab_keys[-2] , "<mask>" )
self.assertEqual(vocab_keys[-1] , "<ctc_blank>" )
self.assertEqual(len(a_ ) , 8_1 )
def snake_case_ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 7_9 )
def snake_case_ ( self ):
a_ : Any = self.get_tokenizers(do_lower_case=a_ )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
a_ : Dict = tokenizer.vocab_size
a_ : List[str] = len(a_ )
self.assertNotEqual(a_ , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
a_ : Optional[int] = ["aaaaa bbbbbb", "cccccccccdddddddd"]
a_ : int = tokenizer.add_tokens(a_ )
a_ : List[Any] = tokenizer.vocab_size
a_ : Tuple = len(a_ )
self.assertNotEqual(a_ , 0 )
self.assertEqual(a_ , a_ )
self.assertEqual(a_ , len(a_ ) )
self.assertEqual(a_ , all_size + len(a_ ) )
a_ : str = tokenizer.encode("aaaaa bbbbbb low cccccccccdddddddd l" , add_special_tokens=a_ )
self.assertGreaterEqual(len(a_ ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
a_ : Tuple = {"eos_token": ">>>>|||<||<<|<<", "pad_token": "<<<<<|||>|>>>>|>"}
a_ : Dict = tokenizer.add_special_tokens(a_ )
a_ : Optional[Any] = tokenizer.vocab_size
a_ : Any = len(a_ )
self.assertNotEqual(a_ , 0 )
self.assertEqual(a_ , a_ )
self.assertEqual(a_ , len(a_ ) )
self.assertEqual(a_ , all_size_a + len(a_ ) )
a_ : Any = tokenizer.encode(
">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l" , add_special_tokens=a_ )
self.assertGreaterEqual(len(a_ ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def snake_case_ ( self ):
pass
def snake_case_ ( self ):
pass
def snake_case_ ( self ):
a_ : Union[str, Any] = self.get_tokenizer()
a_ : Any = tokenizer.tokenize("This is a test" )
# fmt: off
self.assertListEqual(a_ , [SPIECE_UNDERLINE, "T", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "a", SPIECE_UNDERLINE, "t", "e", "s", "t"] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a_ ) , [4, 3_2, 1_1, 1_0, 1_2, 4, 1_0, 1_2, 4, 7, 4, 6, 5, 1_2, 6] , )
a_ : Optional[int] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
a_ , [SPIECE_UNDERLINE, "I", SPIECE_UNDERLINE, "w", "a", "s", SPIECE_UNDERLINE, "b", "o", "r", "n", SPIECE_UNDERLINE, "i", "n", SPIECE_UNDERLINE, "92000", ",", SPIECE_UNDERLINE, "a", "n", "d", SPIECE_UNDERLINE, "t", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "f", "a", "l", "s", "é", "."] )
a_ : Tuple = tokenizer.convert_tokens_to_ids(a_ )
# fmt: off
self.assertListEqual(a_ , [4, 3_0, 4, 2_0, 7, 1_2, 4, 2_5, 8, 1_3, 9, 4, 1_0, 9, 4, 3, 2_3, 4, 7, 9, 1_4, 4, 6, 1_1, 1_0, 1_2, 4, 1_0, 1_2, 4, 1_9, 7, 1_5, 1_2, 7_3, 2_6] )
# fmt: on
a_ : Tuple = tokenizer.convert_ids_to_tokens(a_ )
self.assertListEqual(
a_ , [SPIECE_UNDERLINE, "I", SPIECE_UNDERLINE, "w", "a", "s", SPIECE_UNDERLINE, "b", "o", "r", "n", SPIECE_UNDERLINE, "i", "n", SPIECE_UNDERLINE, "<unk>", ",", SPIECE_UNDERLINE, "a", "n", "d", SPIECE_UNDERLINE, "t", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "f", "a", "l", "s", "é", "."] )
@slow
def snake_case_ ( self ):
# Use custom sequence because this tokenizer does not handle numbers.
a_ : List[Any] = [
"Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides "
"general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural "
"Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained "
"models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.",
"BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly "
"conditioning on both left and right context in all layers.",
"The quick brown fox jumps over the lazy dog.",
]
# fmt: off
a_ : Tuple = {
"input_ids": [
[4, 3_2, 1_3, 7, 9, 1_2, 1_9, 8, 1_3, 1_8, 5, 1_3, 1_2, 4, 6_4, 1_9, 8, 1_3, 1_8, 5, 1_3, 1_5, 2_2, 4, 2_8, 9, 8, 2_0, 9, 4, 7, 1_2, 4, 2_4, 2_2, 6, 8, 1_3, 1_7, 1_1, 3_9, 6, 1_3, 7, 9, 1_2, 1_9, 8, 1_3, 1_8, 5, 1_3, 1_2, 4, 7, 9, 1_4, 4, 2_4, 2_2, 6, 8, 1_3, 1_7, 1_1, 3_9, 2_4, 1_3, 5, 6, 1_3, 7, 1_0, 9, 5, 1_4, 3_9, 2_5, 5, 1_3, 6, 6_3, 4, 2_4, 1_3, 8, 2_7, 1_0, 1_4, 5, 1_2, 4, 2_1, 5, 9, 5, 1_3, 7, 1_5, 3_9, 2_4, 1_6, 1_3, 2_4, 8, 1_2, 5, 4, 7, 1_3, 1_7, 1_1, 1_0, 6, 5, 1_7, 6, 1_6, 1_3, 5, 1_2, 4, 6_4, 4_0, 4_7, 5_4, 3_2, 2_3, 4, 5_3, 4_9, 3_2, 2_3, 4, 5_4, 8, 4_0, 4_7, 5_4, 3_2, 7, 2_3, 4, 6_9, 5_2, 4_3, 2_3, 4, 5_1, 1_0, 1_2, 6, 1_0, 1_5, 4_0, 5, 1_3, 6, 2_3, 4, 6_9, 5_2, 4_8, 5, 6, 2_6, 2_6, 2_6, 6_3, 4, 1_9, 8, 1_3, 4, 4_8, 7, 6, 1_6, 1_3, 7, 1_5, 4, 5_2, 7, 9, 2_1, 1_6, 7, 2_1, 5, 4, 6_1, 9, 1_4, 5, 1_3, 1_2, 6, 7, 9, 1_4, 1_0, 9, 2_1, 4, 6_4, 4_8, 5_2, 6_1, 6_3, 4, 7, 9, 1_4, 4, 4_8, 7, 6, 1_6, 1_3, 7, 1_5, 4, 5_2, 7, 9, 2_1, 1_6, 7, 2_1, 5, 4, 5_3, 5, 9, 5, 1_3, 7, 6, 1_0, 8, 9, 4, 6_4, 4_8, 5_2, 5_3, 6_3, 4, 2_0, 1_0, 6, 1_1, 4, 8, 2_7, 5, 1_3, 4, 6, 1_1, 1_0, 1_3, 6, 2_2, 3_9, 6, 2_0, 8, 4, 2_4, 1_3, 5, 6, 1_3, 7, 1_0, 9, 5, 1_4, 4, 1_8, 8, 1_4, 5, 1_5, 1_2, 4, 1_0, 9, 4, 8, 9, 5, 4, 1_1, 1_6, 9, 1_4, 1_3, 5, 1_4, 4, 2_4, 1_5, 1_6, 1_2, 4, 1_5, 7, 9, 2_1, 1_6, 7, 2_1, 5, 1_2, 4, 7, 9, 1_4, 4, 1_4, 5, 5, 2_4, 4, 1_0, 9, 6, 5, 1_3, 8, 2_4, 5, 1_3, 7, 2_5, 1_0, 1_5, 1_0, 6, 2_2, 4, 2_5, 5, 6, 2_0, 5, 5, 9, 4, 5_8, 7, 3_7, 2_3, 4, 4_9, 2_2, 3_2, 8, 1_3, 1_7, 1_1, 4, 7, 9, 1_4, 4, 3_2, 5, 9, 1_2, 8, 1_3, 5_5, 1_5, 8, 2_0, 2_6, 2],
[4, 4_0, 4_7, 5_4, 3_2, 4, 1_0, 1_2, 4, 1_4, 5, 1_2, 1_0, 2_1, 9, 5, 1_4, 4, 6, 8, 4, 2_4, 1_3, 5, 3_9, 6, 1_3, 7, 1_0, 9, 4, 1_4, 5, 5, 2_4, 4, 2_5, 1_0, 1_4, 1_0, 1_3, 5, 1_7, 6, 1_0, 8, 9, 7, 1_5, 4, 1_3, 5, 2_4, 1_3, 5, 1_2, 5, 9, 6, 7, 6, 1_0, 8, 9, 1_2, 4, 1_9, 1_3, 8, 1_8, 4, 1_6, 9, 1_5, 7, 2_5, 5, 1_5, 5, 1_4, 4, 6, 5, 3_7, 6, 4, 2_5, 2_2, 4, 4_6, 8, 1_0, 9, 6, 1_5, 2_2, 4, 1_7, 8, 9, 1_4, 1_0, 6, 1_0, 8, 9, 1_0, 9, 2_1, 4, 8, 9, 4, 2_5, 8, 6, 1_1, 4, 1_5, 5, 1_9, 6, 4, 7, 9, 1_4, 4, 1_3, 1_0, 2_1, 1_1, 6, 4, 1_7, 8, 9, 6, 5, 3_7, 6, 4, 1_0, 9, 4, 7, 1_5, 1_5, 4, 1_5, 7, 2_2, 5, 1_3, 1_2, 2_6, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 3_2, 1_1, 5, 4, 4_5, 1_6, 1_0, 1_7, 2_8, 4, 2_5, 1_3, 8, 2_0, 9, 4, 1_9, 8, 3_7, 4, 4_6, 1_6, 1_8, 2_4, 1_2, 4, 8, 2_7, 5, 1_3, 4, 6, 1_1, 5, 4, 1_5, 7, 5_7, 2_2, 4, 1_4, 8, 2_1, 2_6, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
"attention_mask": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a_ , model_name="microsoft/speecht5_asr" , revision="c5ef64c71905caeccde0e4462ef3f9077224c524" , sequences=a_ , ) | 370 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ : Dict = logging.get_logger(__name__)
UpperCamelCase__ : Tuple = {
"""google/pegasus-large""": """https://huggingface.co/google/pegasus-large/resolve/main/config.json""",
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class lowerCamelCase_ ( UpperCamelCase__ ):
SCREAMING_SNAKE_CASE_ = 'pegasus'
SCREAMING_SNAKE_CASE_ = ['past_key_values']
SCREAMING_SNAKE_CASE_ = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : List[str] ,__lowerCamelCase : Optional[int]=5_02_65 ,__lowerCamelCase : Any=10_24 ,__lowerCamelCase : List[Any]=12 ,__lowerCamelCase : Tuple=40_96 ,__lowerCamelCase : Any=16 ,__lowerCamelCase : Optional[Any]=12 ,__lowerCamelCase : Union[str, Any]=40_96 ,__lowerCamelCase : Optional[Any]=16 ,__lowerCamelCase : str=0.0 ,__lowerCamelCase : List[Any]=0.0 ,__lowerCamelCase : Optional[int]=True ,__lowerCamelCase : int=True ,__lowerCamelCase : Union[str, Any]="gelu" ,__lowerCamelCase : Any=10_24 ,__lowerCamelCase : Tuple=0.1 ,__lowerCamelCase : Union[str, Any]=0.0 ,__lowerCamelCase : List[str]=0.0 ,__lowerCamelCase : List[str]=0.02 ,__lowerCamelCase : Dict=0 ,__lowerCamelCase : Optional[int]=False ,__lowerCamelCase : Optional[int]=0 ,__lowerCamelCase : Optional[Any]=1 ,__lowerCamelCase : List[str]=1 ,**__lowerCamelCase : str ,):
'''simple docstring'''
a = vocab_size
a = max_position_embeddings
a = d_model
a = encoder_ffn_dim
a = encoder_layers
a = encoder_attention_heads
a = decoder_ffn_dim
a = decoder_layers
a = decoder_attention_heads
a = dropout
a = attention_dropout
a = activation_dropout
a = activation_function
a = init_std
a = encoder_layerdrop
a = decoder_layerdrop
a = use_cache
a = encoder_layers
a = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=_a ,eos_token_id=_a ,is_encoder_decoder=_a ,decoder_start_token_id=_a ,forced_eos_token_id=_a ,**_a ,)
@property
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
'''simple docstring'''
return self.d_model
| 387 |
import os
def lowerCAmelCase_ ( ):
with open(os.path.dirname(snake_case_ ) + """/p022_names.txt""" ) as file:
_A : Optional[Any] = str(file.readlines()[0] )
_A : Dict = names.replace("""\"""","""""" ).split(""",""" )
names.sort()
_A : Any = 0
_A : Dict = 0
for i, name in enumerate(snake_case_ ):
for letter in name:
name_score += ord(snake_case_ ) - 64
total_score += (i + 1) * name_score
_A : str = 0
return total_score
if __name__ == "__main__":
print(solution())
| 307 | 0 |
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : Optional[int] = AlbertConfig.from_json_file(SCREAMING_SNAKE_CASE_ )
print(f"""Building PyTorch model from configuration: {config}""" )
lowerCamelCase : str = AlbertForPreTraining(SCREAMING_SNAKE_CASE_ )
# Load weights from tf checkpoint
load_tf_weights_in_albert(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--albert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained ALBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_snake_case = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 716 |
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {'''vocab_file''': '''spiece.model'''}
_snake_case = {
'''vocab_file''': {
'''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model''',
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'''
),
}
}
_snake_case = {
'''google/bigbird-roberta-base''': 40_96,
'''google/bigbird-roberta-large''': 40_96,
'''google/bigbird-base-trivia-itc''': 40_96,
}
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : Any = VOCAB_FILES_NAMES
__A : Dict = PRETRAINED_VOCAB_FILES_MAP
__A : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A : Tuple = ["input_ids", "attention_mask"]
__A : List[int] = []
def __init__( self , __A , __A="<unk>" , __A="<s>" , __A="</s>" , __A="<pad>" , __A="[SEP]" , __A="[MASK]" , __A="[CLS]" , __A = None , **__A , ):
"""simple docstring"""
lowerCamelCase : List[Any] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else bos_token
lowerCamelCase : Union[str, Any] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else eos_token
lowerCamelCase : Optional[Any] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else unk_token
lowerCamelCase : Dict = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else pad_token
lowerCamelCase : Tuple = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else cls_token
lowerCamelCase : List[Any] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase : Dict = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token
lowerCamelCase : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__A , eos_token=__A , unk_token=__A , pad_token=__A , sep_token=__A , mask_token=__A , cls_token=__A , sp_model_kwargs=self.sp_model_kwargs , **__A , )
lowerCamelCase : int = vocab_file
lowerCamelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__A )
@property
def _snake_case ( self ):
"""simple docstring"""
return self.sp_model.get_piece_size()
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : int = {self.convert_ids_to_tokens(__A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
"""simple docstring"""
lowerCamelCase : List[Any] = self.__dict__.copy()
lowerCamelCase : Tuple = None
return state
def __setstate__( self , __A ):
"""simple docstring"""
lowerCamelCase : List[str] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowerCamelCase : Dict = {}
lowerCamelCase : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _snake_case ( self , __A ):
"""simple docstring"""
return self.sp_model.encode(__A , out_type=__A )
def _snake_case ( self , __A ):
"""simple docstring"""
return self.sp_model.piece_to_id(__A )
def _snake_case ( self , __A ):
"""simple docstring"""
lowerCamelCase : Tuple = self.sp_model.IdToPiece(__A )
return token
def _snake_case ( self , __A ):
"""simple docstring"""
lowerCamelCase : str = []
lowerCamelCase : str = ""
lowerCamelCase : Dict = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__A ) + token
lowerCamelCase : Union[str, Any] = True
lowerCamelCase : Tuple = []
else:
current_sub_tokens.append(__A )
lowerCamelCase : int = False
out_string += self.sp_model.decode(__A )
return out_string.strip()
def _snake_case ( self , __A , __A = False , __A = None , __A = True , **__A , ):
"""simple docstring"""
lowerCamelCase : str = kwargs.pop("use_source_tokenizer" , __A )
lowerCamelCase : Any = self.convert_ids_to_tokens(__A , skip_special_tokens=__A )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
lowerCamelCase : List[Any] = []
lowerCamelCase : Dict = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(__A ) )
lowerCamelCase : Union[str, Any] = []
sub_texts.append(__A )
else:
current_sub_text.append(__A )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(__A ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
lowerCamelCase : Tuple = re.sub(R" (\[(MASK|SEP)\])" , R"\1" , " ".join(__A ) )
else:
lowerCamelCase : Optional[int] = "".join(__A )
lowerCamelCase : List[Any] = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
lowerCamelCase : Any = self.clean_up_tokenization(__A )
return clean_text
else:
return text
def _snake_case ( self , __A , __A = None ):
"""simple docstring"""
if not os.path.isdir(__A ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCamelCase : List[str] = os.path.join(
__A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __A )
elif not os.path.isfile(self.vocab_file ):
with open(__A , "wb" ) as fi:
lowerCamelCase : str = self.sp_model.serialized_model_proto()
fi.write(__A )
return (out_vocab_file,)
def _snake_case ( self , __A , __A = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCamelCase : Union[str, Any] = [self.cls_token_id]
lowerCamelCase : Dict = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def _snake_case ( self , __A , __A = None , __A = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A )
if token_ids_a is None:
return [1] + ([0] * len(__A )) + [1]
return [1] + ([0] * len(__A )) + [1] + ([0] * len(__A )) + [1]
def _snake_case ( self , __A , __A = None ):
"""simple docstring"""
lowerCamelCase : List[str] = [self.sep_token_id]
lowerCamelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 231 | 0 |
"""simple docstring"""
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = 1_0**-1_0 ):
"""simple docstring"""
_lowerCAmelCase = a
while True:
_lowerCAmelCase = Decimal(__snake_case ) - (
Decimal(eval(__snake_case ) ) / Decimal(eval(str(diff(__snake_case ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(__snake_case ) ) < precision: # noqa: S307
return float(__snake_case )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f'The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}')
# Find root of polynomial
print(f'The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}')
# Find Square Root of 5
print(f'The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}')
# Exponential Roots
print(f'The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}')
| 589 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase = {
'configuration_table_transformer': [
'TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TableTransformerConfig',
'TableTransformerOnnxConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TableTransformerForObjectDetection',
'TableTransformerModel',
'TableTransformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 406 | 0 |
'''simple docstring'''
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
_lowerCAmelCase = logging.get_logger(__name__)
@add_end_docstrings(__a )
class _SCREAMING_SNAKE_CASE ( __a ):
def __init__( self : Tuple , **a__ : Tuple ):
super().__init__(**a__ )
if self.framework == "tf":
raise ValueError(F'''The {self.__class__} is only available in PyTorch.''' )
requires_backends(self , '''vision''' )
self.check_model_type(a__ )
def __call__( self : List[Any] , a__ : Union[str, "Image.Image", List[Dict[str, Any]]] , a__ : Union[str, List[str]] = None , **a__ : Optional[int] , ):
if "text_queries" in kwargs:
__magic_name__ = kwargs.pop('''text_queries''' )
if isinstance(a__ , (str, Image.Image) ):
__magic_name__ = {'''image''': image, '''candidate_labels''': candidate_labels}
else:
__magic_name__ = image
__magic_name__ = super().__call__(a__ , **a__ )
return results
def snake_case__ ( self : Tuple , **a__ : Optional[Any] ):
__magic_name__ = {}
if "threshold" in kwargs:
__magic_name__ = kwargs['''threshold''']
if "top_k" in kwargs:
__magic_name__ = kwargs['''top_k''']
return {}, {}, postprocess_params
def snake_case__ ( self : Tuple , a__ : Tuple ):
__magic_name__ = load_image(inputs['''image'''] )
__magic_name__ = inputs['''candidate_labels''']
if isinstance(a__ , a__ ):
__magic_name__ = candidate_labels.split(''',''' )
__magic_name__ = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(a__ ):
__magic_name__ = self.tokenizer(a__ , return_tensors=self.framework )
__magic_name__ = self.image_processor(a__ , return_tensors=self.framework )
yield {
"is_last": i == len(a__ ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def snake_case__ ( self : int , a__ : Optional[int] ):
__magic_name__ = model_inputs.pop('''target_size''' )
__magic_name__ = model_inputs.pop('''candidate_label''' )
__magic_name__ = model_inputs.pop('''is_last''' )
__magic_name__ = self.model(**a__ )
__magic_name__ = {'''target_size''': target_size, '''candidate_label''': candidate_label, '''is_last''': is_last, **outputs}
return model_outputs
def snake_case__ ( self : Optional[Any] , a__ : Dict , a__ : Any=0.1 , a__ : str=None ):
__magic_name__ = []
for model_output in model_outputs:
__magic_name__ = model_output['''candidate_label''']
__magic_name__ = BaseModelOutput(a__ )
__magic_name__ = self.image_processor.post_process_object_detection(
outputs=a__ , threshold=a__ , target_sizes=model_output['''target_size'''] )[0]
for index in outputs["scores"].nonzero():
__magic_name__ = outputs['''scores'''][index].item()
__magic_name__ = self._get_bounding_box(outputs['''boxes'''][index][0] )
__magic_name__ = {'''score''': score, '''label''': label, '''box''': box}
results.append(a__ )
__magic_name__ = sorted(a__ , key=lambda a__ : x["score"] , reverse=a__ )
if top_k:
__magic_name__ = results[:top_k]
return results
def snake_case__ ( self : List[Any] , a__ : "torch.Tensor" ):
if self.framework != "pt":
raise ValueError('''The ZeroShotObjectDetectionPipeline is only available in PyTorch.''' )
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = box.int().tolist()
__magic_name__ = {
'''xmin''': xmin,
'''ymin''': ymin,
'''xmax''': xmax,
'''ymax''': ymax,
}
return bbox
| 245 |
'''simple docstring'''
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def UpperCamelCase ( a ) -> Optional[int]:
'''simple docstring'''
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
_lowerCAmelCase = "\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n"
class _SCREAMING_SNAKE_CASE ( __a ):
@staticmethod
def snake_case__ ( a__ : ArgumentParser ):
__magic_name__ = parser.add_parser(
'''convert''' , help='''CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.''' , )
train_parser.add_argument('''--model_type''' , type=a__ , required=a__ , help='''Model\'s type.''' )
train_parser.add_argument(
'''--tf_checkpoint''' , type=a__ , required=a__ , help='''TensorFlow checkpoint path or folder.''' )
train_parser.add_argument(
'''--pytorch_dump_output''' , type=a__ , required=a__ , help='''Path to the PyTorch saved model output.''' )
train_parser.add_argument('''--config''' , type=a__ , default='''''' , help='''Configuration file path or folder.''' )
train_parser.add_argument(
'''--finetuning_task_name''' , type=a__ , default=a__ , help='''Optional fine-tuning task name if the TF model was a finetuned model.''' , )
train_parser.set_defaults(func=a__ )
def __init__( self : List[str] , a__ : str , a__ : str , a__ : str , a__ : str , a__ : str , *a__ : Optional[Any] , ):
__magic_name__ = logging.get_logger('''transformers-cli/converting''' )
self._logger.info(F'''Loading model {model_type}''' )
__magic_name__ = model_type
__magic_name__ = tf_checkpoint
__magic_name__ = pytorch_dump_output
__magic_name__ = config
__magic_name__ = finetuning_task_name
def snake_case__ ( self : Optional[Any] ):
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(a__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(a__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(a__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(a__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(a__ )
if "ckpt" in self._tf_checkpoint.lower():
__magic_name__ = self._tf_checkpoint
__magic_name__ = ''''''
else:
__magic_name__ = self._tf_checkpoint
__magic_name__ = ''''''
convert_transfo_xl_checkpoint_to_pytorch(
a__ , self._config , self._pytorch_dump_output , a__ )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(a__ )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(a__ )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
'''--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]''' )
| 245 | 1 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a_ : Optional[int] = {
'configuration_efficientnet': [
'EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EfficientNetConfig',
'EfficientNetOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Tuple = ['EfficientNetImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Optional[int] = [
'EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'EfficientNetForImageClassification',
'EfficientNetModel',
'EfficientNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
a_ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 73 |
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class UpperCamelCase :
__UpperCamelCase =BlenderbotSmallConfig
__UpperCamelCase ={}
__UpperCamelCase ="gelu"
def __init__( self : Optional[int] , snake_case__ : Dict , snake_case__ : Optional[int]=1_3 , snake_case__ : Optional[int]=7 , snake_case__ : Optional[Any]=True , snake_case__ : List[str]=False , snake_case__ : int=9_9 , snake_case__ : Any=3_2 , snake_case__ : Tuple=2 , snake_case__ : Optional[int]=4 , snake_case__ : int=3_7 , snake_case__ : Optional[Any]=0.1 , snake_case__ : int=0.1 , snake_case__ : Any=2_0 , snake_case__ : List[Any]=2 , snake_case__ : int=1 , snake_case__ : int=0 , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = eos_token_id
SCREAMING_SNAKE_CASE = pad_token_id
SCREAMING_SNAKE_CASE = bos_token_id
def UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
SCREAMING_SNAKE_CASE = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
SCREAMING_SNAKE_CASE = tf.concat([input_ids, eos_tensor] , axis=1 )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
SCREAMING_SNAKE_CASE = prepare_blenderbot_small_inputs_dict(snake_case__ , snake_case__ , snake_case__ )
return config, inputs_dict
def UpperCamelCase ( self : Dict , snake_case__ : Tuple , snake_case__ : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = TFBlenderbotSmallModel(config=snake_case__ ).get_decoder()
SCREAMING_SNAKE_CASE = inputs_dict['input_ids']
SCREAMING_SNAKE_CASE = input_ids[:1, :]
SCREAMING_SNAKE_CASE = inputs_dict['attention_mask'][:1, :]
SCREAMING_SNAKE_CASE = inputs_dict['head_mask']
SCREAMING_SNAKE_CASE = 1
# first forward pass
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , head_mask=snake_case__ , use_cache=snake_case__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
SCREAMING_SNAKE_CASE = tf.concat([input_ids, next_tokens] , axis=-1 )
SCREAMING_SNAKE_CASE = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ )[0]
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , past_key_values=snake_case__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
SCREAMING_SNAKE_CASE = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
SCREAMING_SNAKE_CASE = output_from_no_past[:, -3:, random_slice_idx]
SCREAMING_SNAKE_CASE = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(snake_case__ , snake_case__ , rtol=1E-3 )
def __lowerCAmelCase ( _UpperCamelCase : Dict , _UpperCamelCase : Optional[int] , _UpperCamelCase : str , _UpperCamelCase : Dict=None , _UpperCamelCase : Optional[int]=None , _UpperCamelCase : Any=None , _UpperCamelCase : List[str]=None , _UpperCamelCase : Optional[Any]=None , ) -> str:
'''simple docstring'''
if attention_mask is None:
SCREAMING_SNAKE_CASE = tf.cast(tf.math.not_equal(_UpperCamelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
SCREAMING_SNAKE_CASE = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
SCREAMING_SNAKE_CASE = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
SCREAMING_SNAKE_CASE = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
__UpperCamelCase =(
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
__UpperCamelCase =(TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
__UpperCamelCase =(
{
"conversational": TFBlenderbotSmallForConditionalGeneration,
"feature-extraction": TFBlenderbotSmallModel,
"summarization": TFBlenderbotSmallForConditionalGeneration,
"text2text-generation": TFBlenderbotSmallForConditionalGeneration,
"translation": TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
__UpperCamelCase =True
__UpperCamelCase =False
__UpperCamelCase =False
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = TFBlenderbotSmallModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=snake_case__ )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*snake_case__ )
@require_tokenizers
@require_tf
class UpperCamelCase ( unittest.TestCase ):
__UpperCamelCase =[
"Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like "
" i'm going to throw up.\nand why is that?"
]
__UpperCamelCase ="facebook/blenderbot_small-90M"
@cached_property
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
return BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
@cached_property
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.tokenizer(self.src_text , return_tensors='tf' )
SCREAMING_SNAKE_CASE = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=snake_case__ , )
SCREAMING_SNAKE_CASE = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=snake_case__ )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 439 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __a ( unittest.TestCase ):
def snake_case_ ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def snake_case_ ( self ):
_lowerCamelCase = 1
_lowerCamelCase = 3
_lowerCamelCase = (32, 32)
_lowerCamelCase = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowercase__ )
return image
@property
def snake_case_ ( self ):
torch.manual_seed(0 )
_lowerCamelCase = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=lowercase__ , only_cross_attention=(True, True, False) , num_class_embeds=1_00 , )
return model
@property
def snake_case_ ( self ):
torch.manual_seed(0 )
_lowerCamelCase = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
return model
@property
def snake_case_ ( self ):
torch.manual_seed(0 )
_lowerCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='gelu' , projection_dim=5_12 , )
return CLIPTextModel(lowercase__ )
def snake_case_ ( self ):
_lowerCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_lowerCamelCase = self.dummy_cond_unet_upscale
_lowerCamelCase = DDPMScheduler()
_lowerCamelCase = DDIMScheduler(prediction_type='v_prediction' )
_lowerCamelCase = self.dummy_vae
_lowerCamelCase = self.dummy_text_encoder
_lowerCamelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_lowerCamelCase = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCamelCase = Image.fromarray(np.uinta(lowercase__ ) ).convert('RGB' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
_lowerCamelCase = StableDiffusionUpscalePipeline(
unet=lowercase__ , low_res_scheduler=lowercase__ , scheduler=lowercase__ , vae=lowercase__ , text_encoder=lowercase__ , tokenizer=lowercase__ , max_noise_level=3_50 , )
_lowerCamelCase = sd_pipe.to(lowercase__ )
sd_pipe.set_progress_bar_config(disable=lowercase__ )
_lowerCamelCase = '''A painting of a squirrel eating a burger'''
_lowerCamelCase = torch.Generator(device=lowercase__ ).manual_seed(0 )
_lowerCamelCase = sd_pipe(
[prompt] , image=lowercase__ , generator=lowercase__ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , )
_lowerCamelCase = output.images
_lowerCamelCase = torch.Generator(device=lowercase__ ).manual_seed(0 )
_lowerCamelCase = sd_pipe(
[prompt] , image=lowercase__ , generator=lowercase__ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , return_dict=lowercase__ , )[0]
_lowerCamelCase = image[0, -3:, -3:, -1]
_lowerCamelCase = image_from_tuple[0, -3:, -3:, -1]
_lowerCamelCase = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
_lowerCamelCase = np.array([0.3113, 0.3910, 0.4272, 0.4859, 0.5061, 0.4652, 0.5362, 0.5715, 0.5661] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case_ ( self ):
_lowerCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_lowerCamelCase = self.dummy_cond_unet_upscale
_lowerCamelCase = DDPMScheduler()
_lowerCamelCase = DDIMScheduler(prediction_type='v_prediction' )
_lowerCamelCase = self.dummy_vae
_lowerCamelCase = self.dummy_text_encoder
_lowerCamelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_lowerCamelCase = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCamelCase = Image.fromarray(np.uinta(lowercase__ ) ).convert('RGB' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
_lowerCamelCase = StableDiffusionUpscalePipeline(
unet=lowercase__ , low_res_scheduler=lowercase__ , scheduler=lowercase__ , vae=lowercase__ , text_encoder=lowercase__ , tokenizer=lowercase__ , max_noise_level=3_50 , )
_lowerCamelCase = sd_pipe.to(lowercase__ )
sd_pipe.set_progress_bar_config(disable=lowercase__ )
_lowerCamelCase = '''A painting of a squirrel eating a burger'''
_lowerCamelCase = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , )
_lowerCamelCase = output.images
assert image.shape[0] == 2
_lowerCamelCase = torch.Generator(device=lowercase__ ).manual_seed(0 )
_lowerCamelCase = sd_pipe(
[prompt] , image=lowercase__ , generator=lowercase__ , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , )
_lowerCamelCase = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def snake_case_ ( self ):
_lowerCamelCase = self.dummy_cond_unet_upscale
_lowerCamelCase = DDPMScheduler()
_lowerCamelCase = DDIMScheduler(prediction_type='v_prediction' )
_lowerCamelCase = self.dummy_vae
_lowerCamelCase = self.dummy_text_encoder
_lowerCamelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_lowerCamelCase = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCamelCase = Image.fromarray(np.uinta(lowercase__ ) ).convert('RGB' ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
_lowerCamelCase = unet.half()
_lowerCamelCase = text_encoder.half()
# make sure here that pndm scheduler skips prk
_lowerCamelCase = StableDiffusionUpscalePipeline(
unet=lowercase__ , low_res_scheduler=lowercase__ , scheduler=lowercase__ , vae=lowercase__ , text_encoder=lowercase__ , tokenizer=lowercase__ , max_noise_level=3_50 , )
_lowerCamelCase = sd_pipe.to(lowercase__ )
sd_pipe.set_progress_bar_config(disable=lowercase__ )
_lowerCamelCase = '''A painting of a squirrel eating a burger'''
_lowerCamelCase = torch.manual_seed(0 )
_lowerCamelCase = sd_pipe(
[prompt] , image=lowercase__ , generator=lowercase__ , num_inference_steps=2 , output_type='np' , ).images
_lowerCamelCase = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class __a ( unittest.TestCase ):
def snake_case_ ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self ):
_lowerCamelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-upscale/low_res_cat.png' )
_lowerCamelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'
'/upsampled_cat.npy' )
_lowerCamelCase = '''stabilityai/stable-diffusion-x4-upscaler'''
_lowerCamelCase = StableDiffusionUpscalePipeline.from_pretrained(lowercase__ )
pipe.to(lowercase__ )
pipe.set_progress_bar_config(disable=lowercase__ )
pipe.enable_attention_slicing()
_lowerCamelCase = '''a cat sitting on a park bench'''
_lowerCamelCase = torch.manual_seed(0 )
_lowerCamelCase = pipe(
prompt=lowercase__ , image=lowercase__ , generator=lowercase__ , output_type='np' , )
_lowerCamelCase = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 1e-3
def snake_case_ ( self ):
_lowerCamelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-upscale/low_res_cat.png' )
_lowerCamelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'
'/upsampled_cat_fp16.npy' )
_lowerCamelCase = '''stabilityai/stable-diffusion-x4-upscaler'''
_lowerCamelCase = StableDiffusionUpscalePipeline.from_pretrained(
lowercase__ , torch_dtype=torch.floataa , )
pipe.to(lowercase__ )
pipe.set_progress_bar_config(disable=lowercase__ )
pipe.enable_attention_slicing()
_lowerCamelCase = '''a cat sitting on a park bench'''
_lowerCamelCase = torch.manual_seed(0 )
_lowerCamelCase = pipe(
prompt=lowercase__ , image=lowercase__ , generator=lowercase__ , output_type='np' , )
_lowerCamelCase = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def snake_case_ ( self ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_lowerCamelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-upscale/low_res_cat.png' )
_lowerCamelCase = '''stabilityai/stable-diffusion-x4-upscaler'''
_lowerCamelCase = StableDiffusionUpscalePipeline.from_pretrained(
lowercase__ , torch_dtype=torch.floataa , )
pipe.to(lowercase__ )
pipe.set_progress_bar_config(disable=lowercase__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_lowerCamelCase = '''a cat sitting on a park bench'''
_lowerCamelCase = torch.manual_seed(0 )
_lowerCamelCase = pipe(
prompt=lowercase__ , image=lowercase__ , generator=lowercase__ , num_inference_steps=5 , output_type='np' , )
_lowerCamelCase = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 703 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( snake_case : int )-> int:
return 1 if digit in (0, 1) else (digit * factorial(digit - 1 ))
def SCREAMING_SNAKE_CASE_ ( snake_case : int )-> bool:
_lowerCamelCase = 0
_lowerCamelCase = number
while duplicate > 0:
_lowerCamelCase , _lowerCamelCase = divmod(snake_case , 10 )
fact_sum += factorial(snake_case )
return fact_sum == number
if __name__ == "__main__":
print("""Program to check whether a number is a Krisnamurthy Number or not.""")
A_ : Dict =int(input("""Enter number: """).strip())
print(
f'{number} is {"" if krishnamurthy(number) else "not "}a Krishnamurthy Number.'
)
| 222 | 0 |
import colorsys
from PIL import Image # type: ignore
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> float:
SCREAMING_SNAKE_CASE__ = x
SCREAMING_SNAKE_CASE__ = y
for step in range(lowerCAmelCase_ ): # noqa: B007
SCREAMING_SNAKE_CASE__ = a * a - b * b + x
SCREAMING_SNAKE_CASE__ = 2 * a * b + y
SCREAMING_SNAKE_CASE__ = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def __snake_case ( lowerCAmelCase_ ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return (2_5_5, 2_5_5, 2_5_5)
def __snake_case ( lowerCAmelCase_ ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 2_5_5 ) for i in colorsys.hsv_to_rgb(lowerCAmelCase_ , 1 , 1 ) )
def __snake_case ( lowerCAmelCase_ = 8_0_0 , lowerCAmelCase_ = 6_0_0 , lowerCAmelCase_ = -0.6 , lowerCAmelCase_ = 0 , lowerCAmelCase_ = 3.2 , lowerCAmelCase_ = 5_0 , lowerCAmelCase_ = True , ) -> Image.Image:
SCREAMING_SNAKE_CASE__ = Image.new('''RGB''' , (image_width, image_height) )
SCREAMING_SNAKE_CASE__ = img.load()
# loop through the image-coordinates
for image_x in range(lowerCAmelCase_ ):
for image_y in range(lowerCAmelCase_ ):
# determine the figure-coordinates based on the image-coordinates
SCREAMING_SNAKE_CASE__ = figure_width / image_width * image_height
SCREAMING_SNAKE_CASE__ = figure_center_x + (image_x / image_width - 0.5) * figure_width
SCREAMING_SNAKE_CASE__ = figure_center_y + (image_y / image_height - 0.5) * figure_height
SCREAMING_SNAKE_CASE__ = get_distance(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
SCREAMING_SNAKE_CASE__ = get_color_coded_rgb(lowerCAmelCase_ )
else:
SCREAMING_SNAKE_CASE__ = get_black_and_white_rgb(lowerCAmelCase_ )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
_A : Optional[int] = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 100 |
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def __snake_case ( lowerCAmelCase_ ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = []
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight''',
f'''stage{idx}.patch_embed.proj.weight''',
) )
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias''',
f'''stage{idx}.patch_embed.proj.bias''',
) )
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight''',
f'''stage{idx}.patch_embed.norm.weight''',
) )
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias''',
f'''stage{idx}.patch_embed.norm.bias''',
) )
return embed
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = []
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj_q.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj_q.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj_k.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj_k.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj_v.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj_v.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj.bias''',
) )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.bias''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.bias''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight''', f'''stage{idx}.blocks.{cnt}.norm1.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias''', f'''stage{idx}.blocks.{cnt}.norm1.bias''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight''', f'''stage{idx}.blocks.{cnt}.norm2.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias''', f'''stage{idx}.blocks.{cnt}.norm2.bias''') )
return attention_weights
def __snake_case ( lowerCAmelCase_ ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = []
token.append((f'''cvt.encoder.stages.{idx}.cls_token''', '''stage2.cls_token''') )
return token
def __snake_case ( ) -> List[str]:
SCREAMING_SNAKE_CASE__ = []
head.append(('''layernorm.weight''', '''norm.weight''') )
head.append(('''layernorm.bias''', '''norm.bias''') )
head.append(('''classifier.weight''', '''head.weight''') )
head.append(('''classifier.bias''', '''head.bias''') )
return head
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = '''imagenet-1k-id2label.json'''
SCREAMING_SNAKE_CASE__ = 1_0_0_0
SCREAMING_SNAKE_CASE__ = '''huggingface/label-files'''
SCREAMING_SNAKE_CASE__ = num_labels
SCREAMING_SNAKE_CASE__ = json.load(open(cached_download(hf_hub_url(lowerCAmelCase_ , lowerCAmelCase_ , repo_type='''dataset''' ) ) , '''r''' ) )
SCREAMING_SNAKE_CASE__ = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ = idalabel
SCREAMING_SNAKE_CASE__ = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ = SCREAMING_SNAKE_CASE__ = CvtConfig(num_labels=lowerCAmelCase_ , idalabel=lowerCAmelCase_ , labelaid=lowerCAmelCase_ )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "13":
SCREAMING_SNAKE_CASE__ = [1, 2, 1_0]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "21":
SCREAMING_SNAKE_CASE__ = [1, 4, 1_6]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
SCREAMING_SNAKE_CASE__ = [2, 2, 2_0]
SCREAMING_SNAKE_CASE__ = [3, 1_2, 1_6]
SCREAMING_SNAKE_CASE__ = [1_9_2, 7_6_8, 1_0_2_4]
SCREAMING_SNAKE_CASE__ = CvtForImageClassification(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' )
SCREAMING_SNAKE_CASE__ = image_size
SCREAMING_SNAKE_CASE__ = torch.load(lowerCAmelCase_ , map_location=torch.device('''cpu''' ) )
SCREAMING_SNAKE_CASE__ = OrderedDict()
SCREAMING_SNAKE_CASE__ = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
SCREAMING_SNAKE_CASE__ = list_of_state_dict + cls_token(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = list_of_state_dict + embeddings(lowerCAmelCase_ )
for cnt in range(config.depth[idx] ):
SCREAMING_SNAKE_CASE__ = list_of_state_dict + attention(lowerCAmelCase_ , lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = list_of_state_dict + final()
for gg in list_of_state_dict:
print(lowerCAmelCase_ )
for i in range(len(lowerCAmelCase_ ) ):
SCREAMING_SNAKE_CASE__ = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
image_processor.save_pretrained(lowerCAmelCase_ )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
_A : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"""--cvt_model""",
default="""cvt-w24""",
type=str,
help="""Name of the cvt model you'd like to convert.""",
)
parser.add_argument(
"""--image_size""",
default=3_84,
type=int,
help="""Input Image Size""",
)
parser.add_argument(
"""--cvt_file_name""",
default=r"""cvtmodels\CvT-w24-384x384-IN-22k.pth""",
type=str,
help="""Input Image Size""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
_A : int = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 100 | 1 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json""",
"""allenai/longformer-large-4096""": """https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json""",
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"""
),
}
class a__ ( snake_case__ ):
_a : Union[str, Any] = """longformer"""
def __init__( self , _A = 5_1_2 , _A = 2 , _A = 1 , _A = 0 , _A = 2 , _A = 3_0_5_2_2 , _A = 7_6_8 , _A = 1_2 , _A = 1_2 , _A = 3_0_7_2 , _A = "gelu" , _A = 0.1 , _A = 0.1 , _A = 5_1_2 , _A = 2 , _A = 0.02 , _A = 1E-1_2 , _A = False , **_A , ):
"""simple docstring"""
super().__init__(pad_token_id=_A , **_A )
__lowerCAmelCase = attention_window
__lowerCAmelCase = sep_token_id
__lowerCAmelCase = bos_token_id
__lowerCAmelCase = eos_token_id
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = hidden_act
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = onnx_export
class a__ ( snake_case__ ):
def __init__( self , _A , _A = "default" , _A = None ):
"""simple docstring"""
super().__init__(_A , _A , _A )
__lowerCAmelCase = True
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
if self.task == "multiple-choice":
__lowerCAmelCase = {0: "batch", 1: "choice", 2: "sequence"}
else:
__lowerCAmelCase = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("global_attention_mask", dynamic_axis),
] )
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = super().outputs
if self.task == "default":
__lowerCAmelCase = {0: "batch"}
return outputs
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return 1E-4
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return max(super().default_onnx_opset , 1_4 )
def __SCREAMING_SNAKE_CASE( self , _A , _A = -1 , _A = -1 , _A = False , _A = None , ):
"""simple docstring"""
__lowerCAmelCase = super().generate_dummy_inputs(
preprocessor=_A , batch_size=_A , seq_length=_A , is_pair=_A , framework=_A )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
__lowerCAmelCase = torch.zeros_like(inputs["input_ids"] )
# make every second token global
__lowerCAmelCase = 1
return inputs
| 702 |
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
UpperCamelCase__ = random.Random()
def _a ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1.0 , SCREAMING_SNAKE_CASE_ : Any=None , SCREAMING_SNAKE_CASE_ : Any=None ):
if rng is None:
__lowerCAmelCase = global_rng
__lowerCAmelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class a__ ( unittest.TestCase ):
def __init__( self , _A , _A=7 , _A=4_0_0 , _A=2_0_0_0 , _A=2_4 , _A=2_4 , _A=0.0 , _A=1_6_0_0_0 , _A=True , _A=True , ):
"""simple docstring"""
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = min_seq_length
__lowerCAmelCase = max_seq_length
__lowerCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__lowerCAmelCase = feature_size
__lowerCAmelCase = num_mel_bins
__lowerCAmelCase = padding_value
__lowerCAmelCase = sampling_rate
__lowerCAmelCase = return_attention_mask
__lowerCAmelCase = do_normalize
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __SCREAMING_SNAKE_CASE( self , _A=False , _A=False ):
"""simple docstring"""
def _flatten(_A ):
return list(itertools.chain(*_A ) )
if equal_length:
__lowerCAmelCase = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__lowerCAmelCase = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__lowerCAmelCase = [np.asarray(_A ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class a__ ( snake_case__ , unittest.TestCase ):
_a : Optional[int] = SpeechaTextFeatureExtractor if is_speech_available() else None
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = SpeechaTextFeatureExtractionTester(self )
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
self.assertTrue(np.all(np.mean(_A , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(_A , axis=0 ) - 1 ) < 1E-3 ) )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__lowerCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
__lowerCAmelCase = [np.asarray(_A ) for speech_input in speech_inputs]
# Test feature size
__lowerCAmelCase = feature_extractor(_A , padding=_A , return_tensors="np" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
__lowerCAmelCase = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_features
__lowerCAmelCase = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_features
self.assertTrue(np.allclose(_A , _A , atol=1E-3 ) )
# Test batched
__lowerCAmelCase = feature_extractor(_A , return_tensors="np" ).input_features
__lowerCAmelCase = feature_extractor(_A , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(_A , _A ):
self.assertTrue(np.allclose(_A , _A , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
__lowerCAmelCase = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
__lowerCAmelCase = np.asarray(_A )
__lowerCAmelCase = feature_extractor(_A , return_tensors="np" ).input_features
__lowerCAmelCase = feature_extractor(_A , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(_A , _A ):
self.assertTrue(np.allclose(_A , _A , atol=1E-3 ) )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowerCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
__lowerCAmelCase = ["longest", "max_length", "do_not_pad"]
__lowerCAmelCase = [None, 1_6, None]
for max_length, padding in zip(_A , _A ):
__lowerCAmelCase = feature_extractor(
_A , padding=_A , max_length=_A , return_attention_mask=_A )
__lowerCAmelCase = inputs.input_features
__lowerCAmelCase = inputs.attention_mask
__lowerCAmelCase = [np.sum(_A ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowerCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
__lowerCAmelCase = ["longest", "max_length", "do_not_pad"]
__lowerCAmelCase = [None, 1_6, None]
for max_length, padding in zip(_A , _A ):
__lowerCAmelCase = feature_extractor(
_A , max_length=_A , padding=_A , return_tensors="np" , return_attention_mask=_A )
__lowerCAmelCase = inputs.input_features
__lowerCAmelCase = inputs.attention_mask
__lowerCAmelCase = [np.sum(_A ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowerCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
__lowerCAmelCase = feature_extractor(
_A , padding="max_length" , max_length=4 , truncation=_A , return_tensors="np" , return_attention_mask=_A , )
__lowerCAmelCase = inputs.input_features
__lowerCAmelCase = inputs.attention_mask
__lowerCAmelCase = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowerCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
__lowerCAmelCase = feature_extractor(
_A , padding="longest" , max_length=4 , truncation=_A , return_tensors="np" , return_attention_mask=_A , )
__lowerCAmelCase = inputs.input_features
__lowerCAmelCase = inputs.attention_mask
__lowerCAmelCase = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 4, 2_4) )
__lowerCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
__lowerCAmelCase = feature_extractor(
_A , padding="longest" , max_length=1_6 , truncation=_A , return_tensors="np" , return_attention_mask=_A , )
__lowerCAmelCase = inputs.input_features
__lowerCAmelCase = inputs.attention_mask
__lowerCAmelCase = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 6, 2_4) )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
import torch
__lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowerCAmelCase = np.random.rand(1_0_0 , 3_2 ).astype(np.floataa )
__lowerCAmelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__lowerCAmelCase = feature_extractor.pad([{"input_features": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
__lowerCAmelCase = feature_extractor.pad([{"input_features": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
from datasets import load_dataset
__lowerCAmelCase = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
__lowerCAmelCase = ds.sort("id" ).select(range(_A ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = np.array([
-1.57_45, -1.77_13, -1.70_20, -1.60_69, -1.22_50, -1.11_05, -0.90_72, -0.82_41,
-1.23_10, -0.80_98, -0.33_20, -0.41_01, -0.79_85, -0.49_96, -0.82_13, -0.91_28,
-1.04_20, -1.12_86, -1.04_40, -0.79_99, -0.84_05, -1.22_75, -1.54_43, -1.46_25,
] )
# fmt: on
__lowerCAmelCase = self._load_datasamples(1 )
__lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowerCAmelCase = feature_extractor(_A , return_tensors="pt" ).input_features
self.assertEquals(input_features.shape , (1, 5_8_4, 2_4) )
self.assertTrue(np.allclose(input_features[0, 0, :3_0] , _A , atol=1E-4 ) )
| 552 | 0 |
import argparse
import os
import re
_a = "src/diffusers"
# Pattern that looks at the indentation in a line.
_a = re.compile(r"^(\s*)\S")
# Pattern that matches `"key":" and puts `key` in group 0.
_a = re.compile(r"^\s*\"([^\"]+)\":")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
_a = re.compile(r"^\s*_import_structure\[\"([^\"]+)\"\]")
# Pattern that matches `"key",` and puts `key` in group 0.
_a = re.compile(r"^\s*\"([^\"]+)\",\s*$")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
_a = re.compile(r"\[([^\]]+)\]")
def lowerCAmelCase__(__snake_case ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = _re_indent.search(__snake_case )
return "" if search is None else search.groups()[0]
def lowerCAmelCase__(__snake_case ,__snake_case="" ,__snake_case=None ,__snake_case=None ) -> int:
'''simple docstring'''
lowerCamelCase__ = 0
lowerCamelCase__ = code.split('''\n''' )
if start_prompt is not None:
while not lines[index].startswith(__snake_case ):
index += 1
lowerCamelCase__ = ['''\n'''.join(lines[:index] )]
else:
lowerCamelCase__ = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
lowerCamelCase__ = [lines[index]]
index += 1
while index < len(__snake_case ) and (end_prompt is None or not lines[index].startswith(__snake_case )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(__snake_case ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ''' ''' ):
current_block.append(lines[index] )
blocks.append('''\n'''.join(__snake_case ) )
if index < len(__snake_case ) - 1:
lowerCamelCase__ = [lines[index + 1]]
index += 1
else:
lowerCamelCase__ = []
else:
blocks.append('''\n'''.join(__snake_case ) )
lowerCamelCase__ = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(__snake_case ) > 0:
blocks.append('''\n'''.join(__snake_case ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(__snake_case ):
blocks.append('''\n'''.join(lines[index:] ) )
return blocks
def lowerCAmelCase__(__snake_case ) -> Dict:
'''simple docstring'''
def _inner(__snake_case ):
return key(__snake_case ).lower().replace('''_''' ,'''''' )
return _inner
def lowerCAmelCase__(__snake_case ,__snake_case=None ) -> Dict:
'''simple docstring'''
def noop(__snake_case ):
return x
if key is None:
lowerCamelCase__ = noop
# Constants are all uppercase, they go first.
lowerCamelCase__ = [obj for obj in objects if key(__snake_case ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
lowerCamelCase__ = [obj for obj in objects if key(__snake_case )[0].isupper() and not key(__snake_case ).isupper()]
# Functions begin with a lowercase, they go last.
lowerCamelCase__ = [obj for obj in objects if not key(__snake_case )[0].isupper()]
lowerCamelCase__ = ignore_underscore(__snake_case )
return sorted(__snake_case ,key=__snake_case ) + sorted(__snake_case ,key=__snake_case ) + sorted(__snake_case ,key=__snake_case )
def lowerCAmelCase__(__snake_case ) -> Optional[int]:
'''simple docstring'''
def _replace(__snake_case ):
lowerCamelCase__ = match.groups()[0]
if "," not in imports:
return F'[{imports}]'
lowerCamelCase__ = [part.strip().replace('''"''' ,'''''' ) for part in imports.split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowerCamelCase__ = keys[:-1]
return "[" + ", ".join([F'"{k}"' for k in sort_objects(__snake_case )] ) + "]"
lowerCamelCase__ = import_statement.split('''\n''' )
if len(__snake_case ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
lowerCamelCase__ = 2 if lines[1].strip() == '''[''' else 1
lowerCamelCase__ = [(i, _re_strip_line.search(__snake_case ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
lowerCamelCase__ = sort_objects(__snake_case ,key=lambda __snake_case : x[1] )
lowerCamelCase__ = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(__snake_case ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
lowerCamelCase__ = _re_bracket_content.sub(_replace ,lines[1] )
else:
lowerCamelCase__ = [part.strip().replace('''"''' ,'''''' ) for part in lines[1].split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowerCamelCase__ = keys[:-1]
lowerCamelCase__ = get_indent(lines[1] ) + ''', '''.join([F'"{k}"' for k in sort_objects(__snake_case )] )
return "\n".join(__snake_case )
else:
# Finally we have to deal with imports fitting on one line
lowerCamelCase__ = _re_bracket_content.sub(_replace ,__snake_case )
return import_statement
def lowerCAmelCase__(__snake_case ,__snake_case=True ) -> List[str]:
'''simple docstring'''
with open(__snake_case ,'''r''' ) as f:
lowerCamelCase__ = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
lowerCamelCase__ = split_code_in_indented_blocks(
__snake_case ,start_prompt='''_import_structure = {''' ,end_prompt='''if TYPE_CHECKING:''' )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 ,len(__snake_case ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
lowerCamelCase__ = main_blocks[block_idx]
lowerCamelCase__ = block.split('''\n''' )
# Get to the start of the imports.
lowerCamelCase__ = 0
while line_idx < len(__snake_case ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
lowerCamelCase__ = len(__snake_case )
else:
line_idx += 1
if line_idx >= len(__snake_case ):
continue
# Ignore beginning and last line: they don't contain anything.
lowerCamelCase__ = '''\n'''.join(block_lines[line_idx:-1] )
lowerCamelCase__ = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
lowerCamelCase__ = split_code_in_indented_blocks(__snake_case ,indent_level=__snake_case )
# We have two categories of import key: list or _import_structure[key].append/extend
lowerCamelCase__ = _re_direct_key if '''_import_structure''' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
lowerCamelCase__ = [(pattern.search(__snake_case ).groups()[0] if pattern.search(__snake_case ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
lowerCamelCase__ = [(i, key) for i, key in enumerate(__snake_case ) if key is not None]
lowerCamelCase__ = [x[0] for x in sorted(__snake_case ,key=lambda __snake_case : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
lowerCamelCase__ = 0
lowerCamelCase__ = []
for i in range(len(__snake_case ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
lowerCamelCase__ = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(__snake_case )
count += 1
# And we put our main block back together with its first and last line.
lowerCamelCase__ = '''\n'''.join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(__snake_case ):
if check_only:
return True
else:
print(F'Overwriting {file}.' )
with open(__snake_case ,'''w''' ) as f:
f.write('''\n'''.join(__snake_case ) )
def lowerCAmelCase__(__snake_case=True ) -> Dict:
'''simple docstring'''
lowerCamelCase__ = []
for root, _, files in os.walk(__snake_case ):
if "__init__.py" in files:
lowerCamelCase__ = sort_imports(os.path.join(__snake_case ,'''__init__.py''' ) ,check_only=__snake_case )
if result:
lowerCamelCase__ = [os.path.join(__snake_case ,'''__init__.py''' )]
if len(__snake_case ) > 0:
raise ValueError(F'Would overwrite {len(__snake_case )} files, run `make style`.' )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument("--check_only", action="store_true", help="Whether to only check or fix style.")
_a = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 481 |
'''simple docstring'''
from statistics import mean
import numpy as np
def a_ ( lowerCamelCase : list , lowerCamelCase : list , lowerCamelCase : list , lowerCamelCase : int ):
lowerCAmelCase = 0
# Number of processes finished
lowerCAmelCase = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
lowerCAmelCase = [0] * no_of_process
# List to include calculation results
lowerCAmelCase = [0] * no_of_process
# Sort by arrival time.
lowerCAmelCase = [burst_time[i] for i in np.argsort(lowerCamelCase )]
lowerCAmelCase = [process_name[i] for i in np.argsort(lowerCamelCase )]
arrival_time.sort()
while no_of_process > finished_process_count:
lowerCAmelCase = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
lowerCAmelCase = arrival_time[i]
lowerCAmelCase = 0
# Index showing the location of the process being performed
lowerCAmelCase = 0
# Saves the current response ratio.
lowerCAmelCase = 0
for i in range(0 , lowerCamelCase ):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
lowerCAmelCase = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
lowerCAmelCase = temp
lowerCAmelCase = i
# Calculate the turn around time
lowerCAmelCase = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
lowerCAmelCase = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def a_ ( lowerCamelCase : list , lowerCamelCase : list , lowerCamelCase : list , lowerCamelCase : int ):
lowerCAmelCase = [0] * no_of_process
for i in range(0 , lowerCamelCase ):
lowerCAmelCase = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
__snake_case =5
__snake_case =["""A""", """B""", """C""", """D""", """E"""]
__snake_case =[1, 2, 3, 4, 5]
__snake_case =[1, 2, 3, 4, 5]
__snake_case =calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
__snake_case =calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print("""Process name \tArrival time \tBurst time \tTurn around time \tWaiting time""")
for i in range(0, no_of_process):
print(
F'''{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t'''
F'''{turn_around_time[i]}\t\t\t{waiting_time[i]}'''
)
print(F'''average waiting time : {mean(waiting_time):.5f}''')
print(F'''average turn around time : {mean(turn_around_time):.5f}''')
| 133 | 0 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def UpperCAmelCase_ ( _A , _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = original_name.split('''.''' )[0]
SCREAMING_SNAKE_CASE__ = key.split('''.''' )
SCREAMING_SNAKE_CASE__ = int(key_list[key_list.index(_A ) - 2] )
SCREAMING_SNAKE_CASE__ = int(key_list[key_list.index(_A ) - 1] )
SCREAMING_SNAKE_CASE__ = orig_block_num - offset
SCREAMING_SNAKE_CASE__ = key.replace(F'''{orig_block_num}.{layer_num}.{original_name}''' , F'''block.{new_block_num}.{layer_num}.{new_name}''' )
return key
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = OrderedDict()
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = 0, 0
for key, value in state_dict.items():
if key.startswith('''network''' ):
SCREAMING_SNAKE_CASE__ = key.replace('''network''' , '''poolformer.encoder''' )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith('''bias''' ) and "patch_embed" not in key:
patch_emb_offset += 1
SCREAMING_SNAKE_CASE__ = key[: key.find('''proj''' )]
SCREAMING_SNAKE_CASE__ = key.replace(_A , F'''patch_embeddings.{total_embed_found}.''' )
SCREAMING_SNAKE_CASE__ = key.replace('''proj''' , '''projection''' )
if key.endswith('''bias''' ):
total_embed_found += 1
if "patch_embeddings" in key:
SCREAMING_SNAKE_CASE__ = '''poolformer.encoder.''' + key
if "mlp.fc1" in key:
SCREAMING_SNAKE_CASE__ = replace_key_with_offset(_A , _A , '''mlp.fc1''' , '''output.conv1''' )
if "mlp.fc2" in key:
SCREAMING_SNAKE_CASE__ = replace_key_with_offset(_A , _A , '''mlp.fc2''' , '''output.conv2''' )
if "norm1" in key:
SCREAMING_SNAKE_CASE__ = replace_key_with_offset(_A , _A , '''norm1''' , '''before_norm''' )
if "norm2" in key:
SCREAMING_SNAKE_CASE__ = replace_key_with_offset(_A , _A , '''norm2''' , '''after_norm''' )
if "layer_scale_1" in key:
SCREAMING_SNAKE_CASE__ = replace_key_with_offset(_A , _A , '''layer_scale_1''' , '''layer_scale_1''' )
if "layer_scale_2" in key:
SCREAMING_SNAKE_CASE__ = replace_key_with_offset(_A , _A , '''layer_scale_2''' , '''layer_scale_2''' )
if "head" in key:
SCREAMING_SNAKE_CASE__ = key.replace('''head''' , '''classifier''' )
SCREAMING_SNAKE_CASE__ = value
return new_state_dict
def UpperCAmelCase_ ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
SCREAMING_SNAKE_CASE__ = Image.open(requests.get(_A , stream=_A ).raw )
return image
@torch.no_grad()
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = PoolFormerConfig()
# set attributes based on model_name
SCREAMING_SNAKE_CASE__ = '''huggingface/label-files'''
SCREAMING_SNAKE_CASE__ = model_name[-3:]
SCREAMING_SNAKE_CASE__ = 10_00
SCREAMING_SNAKE_CASE__ = '''imagenet-1k-id2label.json'''
SCREAMING_SNAKE_CASE__ = (1, 10_00)
# set config attributes
SCREAMING_SNAKE_CASE__ = json.load(open(hf_hub_download(_A , _A , repo_type='''dataset''' ) , '''r''' ) )
SCREAMING_SNAKE_CASE__ = {int(_A ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ = idalabel
SCREAMING_SNAKE_CASE__ = {v: k for k, v in idalabel.items()}
if size == "s12":
SCREAMING_SNAKE_CASE__ = [2, 2, 6, 2]
SCREAMING_SNAKE_CASE__ = [64, 1_28, 3_20, 5_12]
SCREAMING_SNAKE_CASE__ = 4.0
SCREAMING_SNAKE_CASE__ = 0.9
elif size == "s24":
SCREAMING_SNAKE_CASE__ = [4, 4, 12, 4]
SCREAMING_SNAKE_CASE__ = [64, 1_28, 3_20, 5_12]
SCREAMING_SNAKE_CASE__ = 4.0
SCREAMING_SNAKE_CASE__ = 0.9
elif size == "s36":
SCREAMING_SNAKE_CASE__ = [6, 6, 18, 6]
SCREAMING_SNAKE_CASE__ = [64, 1_28, 3_20, 5_12]
SCREAMING_SNAKE_CASE__ = 4.0
SCREAMING_SNAKE_CASE__ = 1e-6
SCREAMING_SNAKE_CASE__ = 0.9
elif size == "m36":
SCREAMING_SNAKE_CASE__ = [6, 6, 18, 6]
SCREAMING_SNAKE_CASE__ = [96, 1_92, 3_84, 7_68]
SCREAMING_SNAKE_CASE__ = 4.0
SCREAMING_SNAKE_CASE__ = 1e-6
SCREAMING_SNAKE_CASE__ = 0.9_5
elif size == "m48":
SCREAMING_SNAKE_CASE__ = [8, 8, 24, 8]
SCREAMING_SNAKE_CASE__ = [96, 1_92, 3_84, 7_68]
SCREAMING_SNAKE_CASE__ = 4.0
SCREAMING_SNAKE_CASE__ = 1e-6
SCREAMING_SNAKE_CASE__ = 0.9_5
else:
raise ValueError(F'''Size {size} not supported''' )
# load image processor
SCREAMING_SNAKE_CASE__ = PoolFormerImageProcessor(crop_pct=_A )
# Prepare image
SCREAMING_SNAKE_CASE__ = prepare_img()
SCREAMING_SNAKE_CASE__ = image_processor(images=_A , return_tensors='''pt''' ).pixel_values
logger.info(F'''Converting model {model_name}...''' )
# load original state dict
SCREAMING_SNAKE_CASE__ = torch.load(_A , map_location=torch.device('''cpu''' ) )
# rename keys
SCREAMING_SNAKE_CASE__ = rename_keys(_A )
# create HuggingFace model and load state dict
SCREAMING_SNAKE_CASE__ = PoolFormerForImageClassification(_A )
model.load_state_dict(_A )
model.eval()
# Define image processor
SCREAMING_SNAKE_CASE__ = PoolFormerImageProcessor(crop_pct=_A )
SCREAMING_SNAKE_CASE__ = image_processor(images=prepare_img() , return_tensors='''pt''' ).pixel_values
# forward pass
SCREAMING_SNAKE_CASE__ = model(_A )
SCREAMING_SNAKE_CASE__ = outputs.logits
# define expected logit slices for different models
if size == "s12":
SCREAMING_SNAKE_CASE__ = torch.tensor([-0.3_0_4_5, -0.6_7_5_8, -0.4_8_6_9] )
elif size == "s24":
SCREAMING_SNAKE_CASE__ = torch.tensor([0.4_4_0_2, -0.1_3_7_4, -0.8_0_4_5] )
elif size == "s36":
SCREAMING_SNAKE_CASE__ = torch.tensor([-0.6_0_8_0, -0.5_1_3_3, -0.5_8_9_8] )
elif size == "m36":
SCREAMING_SNAKE_CASE__ = torch.tensor([0.3_9_5_2, 0.2_2_6_3, -1.2_6_6_8] )
elif size == "m48":
SCREAMING_SNAKE_CASE__ = torch.tensor([0.1_1_6_7, -0.0_6_5_6, -0.3_4_2_3] )
else:
raise ValueError(F'''Size {size} not supported''' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , _A , atol=1e-2 )
# finally, save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(_A ).mkdir(exist_ok=_A )
model.save_pretrained(_A )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_A )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''poolformer_s12''',
type=str,
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 716 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE : List[Any] = {
'''configuration_roberta''': ['''ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RobertaConfig''', '''RobertaOnnxConfig'''],
'''tokenization_roberta''': ['''RobertaTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[int] = ['''RobertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Tuple = [
'''ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RobertaForCausalLM''',
'''RobertaForMaskedLM''',
'''RobertaForMultipleChoice''',
'''RobertaForQuestionAnswering''',
'''RobertaForSequenceClassification''',
'''RobertaForTokenClassification''',
'''RobertaModel''',
'''RobertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : List[Any] = [
'''TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRobertaForCausalLM''',
'''TFRobertaForMaskedLM''',
'''TFRobertaForMultipleChoice''',
'''TFRobertaForQuestionAnswering''',
'''TFRobertaForSequenceClassification''',
'''TFRobertaForTokenClassification''',
'''TFRobertaMainLayer''',
'''TFRobertaModel''',
'''TFRobertaPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = [
'''FlaxRobertaForCausalLM''',
'''FlaxRobertaForMaskedLM''',
'''FlaxRobertaForMultipleChoice''',
'''FlaxRobertaForQuestionAnswering''',
'''FlaxRobertaForSequenceClassification''',
'''FlaxRobertaForTokenClassification''',
'''FlaxRobertaModel''',
'''FlaxRobertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 472 | 0 |
'''simple docstring'''
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
"""kwargs, expected""" , [
({"""num_shards""": 0, """max_num_jobs""": 1}, []),
({"""num_shards""": 10, """max_num_jobs""": 1}, [range(10 )]),
({"""num_shards""": 10, """max_num_jobs""": 10}, [range(__magic_name__ , i + 1 ) for i in range(10 )]),
({"""num_shards""": 1, """max_num_jobs""": 10}, [range(1 )]),
({"""num_shards""": 10, """max_num_jobs""": 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({"""num_shards""": 3, """max_num_jobs""": 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def UpperCamelCase__ ( __magic_name__ : Dict , __magic_name__ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
snake_case__ : List[str] = _distribute_shards(**__magic_name__ )
assert out == expected
@pytest.mark.parametrize(
"""gen_kwargs, max_num_jobs, expected""" , [
({"""foo""": 0}, 10, [{"""foo""": 0}]),
({"""shards""": [0, 1, 2, 3]}, 1, [{"""shards""": [0, 1, 2, 3]}]),
({"""shards""": [0, 1, 2, 3]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}, {"""shards""": [2]}, {"""shards""": [3]}]),
({"""shards""": [0, 1]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}]),
({"""shards""": [0, 1, 2, 3]}, 2, [{"""shards""": [0, 1]}, {"""shards""": [2, 3]}]),
] , )
def UpperCamelCase__ ( __magic_name__ : List[str] , __magic_name__ : Optional[int] , __magic_name__ : List[Any] ) -> int:
'''simple docstring'''
snake_case__ : Dict = _split_gen_kwargs(__magic_name__ , __magic_name__ )
assert out == expected
@pytest.mark.parametrize(
"""gen_kwargs, expected""" , [
({"""foo""": 0}, 1),
({"""shards""": [0]}, 1),
({"""shards""": [0, 1, 2, 3]}, 4),
({"""shards""": [0, 1, 2, 3], """foo""": 0}, 4),
({"""shards""": [0, 1, 2, 3], """other""": (0, 1)}, 4),
({"""shards""": [0, 1, 2, 3], """shards2""": [0, 1]}, RuntimeError),
] , )
def UpperCamelCase__ ( __magic_name__ : List[str] , __magic_name__ : Tuple ) -> List[str]:
'''simple docstring'''
if expected is RuntimeError:
with pytest.raises(__magic_name__ ):
_number_of_shards_in_gen_kwargs(__magic_name__ )
else:
snake_case__ : Any = _number_of_shards_in_gen_kwargs(__magic_name__ )
assert out == expected
| 38 |
'''simple docstring'''
import argparse
import os
import re
__lowerCAmelCase = "src/diffusers"
# Pattern that looks at the indentation in a line.
__lowerCAmelCase = re.compile(R"^(\s*)\S")
# Pattern that matches `"key":" and puts `key` in group 0.
__lowerCAmelCase = re.compile(R"^\s*\"([^\"]+)\":")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
__lowerCAmelCase = re.compile(R"^\s*_import_structure\[\"([^\"]+)\"\]")
# Pattern that matches `"key",` and puts `key` in group 0.
__lowerCAmelCase = re.compile(R"^\s*\"([^\"]+)\",\s*$")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
__lowerCAmelCase = re.compile(R"\[([^\]]+)\]")
def __UpperCamelCase ( lowercase_ : Any ):
"""simple docstring"""
a_ = _re_indent.search(lowercase_ )
return "" if search is None else search.groups()[0]
def __UpperCamelCase ( lowercase_ : Union[str, Any] , lowercase_ : int="" , lowercase_ : Union[str, Any]=None , lowercase_ : str=None ):
"""simple docstring"""
a_ = 0
a_ = code.split('\n' )
if start_prompt is not None:
while not lines[index].startswith(lowercase_ ):
index += 1
a_ = ['\n'.join(lines[:index] )]
else:
a_ = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
a_ = [lines[index]]
index += 1
while index < len(lowercase_ ) and (end_prompt is None or not lines[index].startswith(lowercase_ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(lowercase_ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ' ' ):
current_block.append(lines[index] )
blocks.append('\n'.join(lowercase_ ) )
if index < len(lowercase_ ) - 1:
a_ = [lines[index + 1]]
index += 1
else:
a_ = []
else:
blocks.append('\n'.join(lowercase_ ) )
a_ = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(lowercase_ ) > 0:
blocks.append('\n'.join(lowercase_ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(lowercase_ ):
blocks.append('\n'.join(lines[index:] ) )
return blocks
def __UpperCamelCase ( lowercase_ : Optional[int] ):
"""simple docstring"""
def _inner(lowercase_ : Union[str, Any] ):
return key(lowercase_ ).lower().replace('_' , '' )
return _inner
def __UpperCamelCase ( lowercase_ : List[Any] , lowercase_ : int=None ):
"""simple docstring"""
def noop(lowercase_ : str ):
return x
if key is None:
a_ = noop
# Constants are all uppercase, they go first.
a_ = [obj for obj in objects if key(lowercase_ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
a_ = [obj for obj in objects if key(lowercase_ )[0].isupper() and not key(lowercase_ ).isupper()]
# Functions begin with a lowercase, they go last.
a_ = [obj for obj in objects if not key(lowercase_ )[0].isupper()]
a_ = ignore_underscore(lowercase_ )
return sorted(lowercase_ , key=lowercase_ ) + sorted(lowercase_ , key=lowercase_ ) + sorted(lowercase_ , key=lowercase_ )
def __UpperCamelCase ( lowercase_ : Optional[Any] ):
"""simple docstring"""
def _replace(lowercase_ : List[Any] ):
a_ = match.groups()[0]
if "," not in imports:
return F'[{imports}]'
a_ = [part.strip().replace('"' , '' ) for part in imports.split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
a_ = keys[:-1]
return "[" + ", ".join([F'"{k}"' for k in sort_objects(lowercase_ )] ) + "]"
a_ = import_statement.split('\n' )
if len(lowercase_ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
a_ = 2 if lines[1].strip() == '[' else 1
a_ = [(i, _re_strip_line.search(lowercase_ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
a_ = sort_objects(lowercase_ , key=lambda lowercase_ : x[1] )
a_ = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(lowercase_ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
a_ = _re_bracket_content.sub(_replace , lines[1] )
else:
a_ = [part.strip().replace('"' , '' ) for part in lines[1].split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
a_ = keys[:-1]
a_ = get_indent(lines[1] ) + ', '.join([F'"{k}"' for k in sort_objects(lowercase_ )] )
return "\n".join(lowercase_ )
else:
# Finally we have to deal with imports fitting on one line
a_ = _re_bracket_content.sub(_replace , lowercase_ )
return import_statement
def __UpperCamelCase ( lowercase_ : Dict , lowercase_ : List[Any]=True ):
"""simple docstring"""
with open(lowercase_ , 'r' ) as f:
a_ = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
a_ = split_code_in_indented_blocks(
lowercase_ , start_prompt='_import_structure = {' , end_prompt='if TYPE_CHECKING:' )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(lowercase_ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
a_ = main_blocks[block_idx]
a_ = block.split('\n' )
# Get to the start of the imports.
a_ = 0
while line_idx < len(lowercase_ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
a_ = len(lowercase_ )
else:
line_idx += 1
if line_idx >= len(lowercase_ ):
continue
# Ignore beginning and last line: they don't contain anything.
a_ = '\n'.join(block_lines[line_idx:-1] )
a_ = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
a_ = split_code_in_indented_blocks(lowercase_ , indent_level=lowercase_ )
# We have two categories of import key: list or _import_structure[key].append/extend
a_ = _re_direct_key if '_import_structure' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
a_ = [(pattern.search(lowercase_ ).groups()[0] if pattern.search(lowercase_ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
a_ = [(i, key) for i, key in enumerate(lowercase_ ) if key is not None]
a_ = [x[0] for x in sorted(lowercase_ , key=lambda lowercase_ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
a_ = 0
a_ = []
for i in range(len(lowercase_ ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
a_ = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(lowercase_ )
count += 1
# And we put our main block back together with its first and last line.
a_ = '\n'.join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(lowercase_ ):
if check_only:
return True
else:
print(F'Overwriting {file}.' )
with open(lowercase_ , 'w' ) as f:
f.write('\n'.join(lowercase_ ) )
def __UpperCamelCase ( lowercase_ : int=True ):
"""simple docstring"""
a_ = []
for root, _, files in os.walk(lowercase_ ):
if "__init__.py" in files:
a_ = sort_imports(os.path.join(lowercase_ , '__init__.py' ) , check_only=lowercase_ )
if result:
a_ = [os.path.join(lowercase_ , '__init__.py' )]
if len(lowercase_ ) > 0:
raise ValueError(F'Would overwrite {len(lowercase_ )} files, run `make style`.' )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("--check_only", action="store_true", help="Whether to only check or fix style.")
__lowerCAmelCase = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 536 | 0 |
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
_lowerCamelCase : List[Any] = imread(r"""digital_image_processing/image_data/lena_small.jpg""")
_lowerCamelCase : Any = cvtColor(img, COLOR_BGR2GRAY)
def _lowerCAmelCase ( ) -> str:
'''simple docstring'''
_UpperCamelCase :Union[str, Any] =cn.convert_to_negative(__a )
# assert negative_img array for at least one True
assert negative_img.any()
def _lowerCAmelCase ( ) -> List[str]:
'''simple docstring'''
with Image.open("""digital_image_processing/image_data/lena_small.jpg""" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(__a , 1_10 ) ).startswith(
"""<PIL.Image.Image image mode=RGB size=100x100 at""" )
def _lowerCAmelCase ( ) -> Any:
'''simple docstring'''
_UpperCamelCase :Dict =canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def _lowerCAmelCase ( ) -> List[str]:
'''simple docstring'''
_UpperCamelCase :Tuple =imread("""digital_image_processing/image_data/lena_small.jpg""" , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
_UpperCamelCase :List[str] =canny.canny(__a )
# assert canny array for at least one True
assert canny_array.any()
def _lowerCAmelCase ( ) -> str:
'''simple docstring'''
assert gg.gaussian_filter(__a , 5 , sigma=0.9 ).all()
def _lowerCAmelCase ( ) -> str:
'''simple docstring'''
_UpperCamelCase :int =array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
_UpperCamelCase :Any =conv.img_convolve(__a , __a ).astype(__a )
assert res.any()
def _lowerCAmelCase ( ) -> Tuple:
'''simple docstring'''
assert med.median_filter(__a , 3 ).any()
def _lowerCAmelCase ( ) -> int:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase :Optional[Any] =sob.sobel_filter(__a )
assert grad.any() and theta.any()
def _lowerCAmelCase ( ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase :List[Any] =sp.make_sepia(__a , 20 )
assert sepia.all()
def _lowerCAmelCase ( __a = "digital_image_processing/image_data/lena_small.jpg" ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase :Tuple =bs.Burkes(imread(__a , 1 ) , 1_20 )
burkes.process()
assert burkes.output_img.any()
def _lowerCAmelCase ( __a = "digital_image_processing/image_data/lena_small.jpg" , ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase :str =rs.NearestNeighbour(imread(__a , 1 ) , 4_00 , 2_00 )
nn.process()
assert nn.output.any()
def _lowerCAmelCase ( ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase :Optional[int] ="""digital_image_processing/image_data/lena.jpg"""
# Reading the image and converting it to grayscale.
_UpperCamelCase :str =imread(__a , 0 )
# Test for get_neighbors_pixel function() return not None
_UpperCamelCase :Optional[int] =0
_UpperCamelCase :int =0
_UpperCamelCase :Optional[Any] =image[x_coordinate][y_coordinate]
_UpperCamelCase :int =lbp.get_neighbors_pixel(
__a , __a , __a , __a )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
_UpperCamelCase :int =np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
_UpperCamelCase :str =lbp.local_binary_value(__a , __a , __a )
assert lbp_image.any() | 709 | '''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class lowerCamelCase__ ( unittest.TestCase ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=7 , lowerCAmelCase__=3 , lowerCAmelCase__=30 , lowerCAmelCase__=400 , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=0.9 , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__=[0.5, 0.5, 0.5] , lowerCAmelCase__=[0.5, 0.5, 0.5] , ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase :List[str] =size if size is not None else {"""shortest_edge""": 30}
_UpperCamelCase :str =crop_size if crop_size is not None else {"""height""": 30, """width""": 30}
_UpperCamelCase :Tuple =parent
_UpperCamelCase :Optional[int] =batch_size
_UpperCamelCase :Tuple =num_channels
_UpperCamelCase :int =min_resolution
_UpperCamelCase :Union[str, Any] =max_resolution
_UpperCamelCase :Tuple =do_resize_and_center_crop
_UpperCamelCase :Union[str, Any] =size
_UpperCamelCase :Union[str, Any] =crop_pct
_UpperCamelCase :Tuple =crop_size
_UpperCamelCase :List[str] =do_normalize
_UpperCamelCase :Any =image_mean
_UpperCamelCase :Optional[Any] =image_std
def _UpperCamelCase ( self ) -> Any:
"""simple docstring"""
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class lowerCamelCase__ ( __snake_case , unittest.TestCase ):
__UpperCAmelCase = PoolFormerImageProcessor if is_vision_available() else None
def _UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
_UpperCamelCase :Dict =PoolFormerImageProcessingTester(self )
@property
def _UpperCamelCase ( self ) -> str:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase :Union[str, Any] =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , """do_resize_and_center_crop""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """size""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """crop_pct""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """do_normalize""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """image_mean""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """image_std""" ) )
def _UpperCamelCase ( self ) -> Any:
"""simple docstring"""
_UpperCamelCase :Dict =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 30} )
self.assertEqual(image_processor.crop_size , {"""height""": 30, """width""": 30} )
_UpperCamelCase :Union[str, Any] =self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def _UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
pass
def _UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase :Any =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCamelCase :List[str] =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
_UpperCamelCase :int =image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_UpperCamelCase :Optional[Any] =image_processing(lowerCAmelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def _UpperCamelCase ( self ) -> str:
"""simple docstring"""
_UpperCamelCase :Optional[Any] =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCamelCase :int =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
# Test not batched input
_UpperCamelCase :List[Any] =image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_UpperCamelCase :Tuple =image_processing(lowerCAmelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def _UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase :Union[str, Any] =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCamelCase :Optional[int] =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test not batched input
_UpperCamelCase :Dict =image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_UpperCamelCase :Tuple =image_processing(lowerCAmelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , ) | 512 | 0 |
'''simple docstring'''
from __future__ import annotations
def __snake_case (__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
"""simple docstring"""
lowerCamelCase_ : Union[str, Any] = len(__UpperCAmelCase )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append(['''. ''' * i + '''Q ''' + '''. ''' * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(__UpperCAmelCase ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , __UpperCAmelCase , __UpperCAmelCase , )
def __snake_case (__UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase_ : list[list[str]] = []
depth_first_search([] , [] , [] , __UpperCAmelCase , __UpperCAmelCase )
# Print all the boards
for board in boards:
for column in board:
print(__UpperCAmelCase )
print('''''' )
print(len(__UpperCAmelCase ) , '''solutions were found.''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 501 |
'''simple docstring'''
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
__lowerCamelCase : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCAmelCase__ ( _lowerCAmelCase ):
def __init__( self : int , UpperCamelCase_ : CLIPSegForImageSegmentation , UpperCamelCase_ : CLIPSegProcessor , UpperCamelCase_ : AutoencoderKL , UpperCamelCase_ : CLIPTextModel , UpperCamelCase_ : CLIPTokenizer , UpperCamelCase_ : UNetaDConditionModel , UpperCamelCase_ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , UpperCamelCase_ : StableDiffusionSafetyChecker , UpperCamelCase_ : CLIPImageProcessor , ) -> Optional[int]:
"""simple docstring"""
super().__init__()
if hasattr(scheduler.config , '''steps_offset''' ) and scheduler.config.steps_offset != 1:
lowerCamelCase_ : int = (
F"""The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"""
F""" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure """
'''to update the config accordingly as leaving `steps_offset` might led to incorrect results'''
''' in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,'''
''' it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`'''
''' file'''
)
deprecate('''steps_offset!=1''' , '''1.0.0''' , UpperCamelCase_ , standard_warn=UpperCamelCase_ )
lowerCamelCase_ : Union[str, Any] = dict(scheduler.config )
lowerCamelCase_ : Optional[Any] = 1
lowerCamelCase_ : List[Any] = FrozenDict(UpperCamelCase_ )
if hasattr(scheduler.config , '''skip_prk_steps''' ) and scheduler.config.skip_prk_steps is False:
lowerCamelCase_ : Any = (
F"""The configuration file of this scheduler: {scheduler} has not set the configuration"""
''' `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make'''
''' sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to'''
''' incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face'''
''' Hub, it would be very nice if you could open a Pull request for the'''
''' `scheduler/scheduler_config.json` file'''
)
deprecate('''skip_prk_steps not set''' , '''1.0.0''' , UpperCamelCase_ , standard_warn=UpperCamelCase_ )
lowerCamelCase_ : Dict = dict(scheduler.config )
lowerCamelCase_ : Union[str, Any] = True
lowerCamelCase_ : Any = FrozenDict(UpperCamelCase_ )
if safety_checker is None:
logger.warning(
F"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
''' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'''
''' results in services or applications open to the public. Both the diffusers team and Hugging Face'''
''' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'''
''' it only for use-cases that involve analyzing network behavior or auditing its results. For more'''
''' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .''' )
self.register_modules(
segmentation_model=UpperCamelCase_ , segmentation_processor=UpperCamelCase_ , vae=UpperCamelCase_ , text_encoder=UpperCamelCase_ , tokenizer=UpperCamelCase_ , unet=UpperCamelCase_ , scheduler=UpperCamelCase_ , safety_checker=UpperCamelCase_ , feature_extractor=UpperCamelCase_ , )
def __UpperCamelCase ( self : str , UpperCamelCase_ : Optional[Union[str, int]] = "auto" ) -> int:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowerCamelCase_ : str = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(UpperCamelCase_ )
def __UpperCamelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
self.enable_attention_slicing(UpperCamelCase_ )
def __UpperCamelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
lowerCamelCase_ : List[str] = torch.device('''cuda''' )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(UpperCamelCase_ , UpperCamelCase_ )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __UpperCamelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
if self.device != torch.device('''meta''' ) or not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(UpperCamelCase_ , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self : Union[str, Any] , UpperCamelCase_ : Union[str, List[str]] , UpperCamelCase_ : Union[torch.FloatTensor, PIL.Image.Image] , UpperCamelCase_ : str , UpperCamelCase_ : int = 512 , UpperCamelCase_ : int = 512 , UpperCamelCase_ : int = 50 , UpperCamelCase_ : float = 7.5 , UpperCamelCase_ : Optional[Union[str, List[str]]] = None , UpperCamelCase_ : Optional[int] = 1 , UpperCamelCase_ : float = 0.0 , UpperCamelCase_ : Optional[torch.Generator] = None , UpperCamelCase_ : Optional[torch.FloatTensor] = None , UpperCamelCase_ : Optional[str] = "pil" , UpperCamelCase_ : bool = True , UpperCamelCase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCamelCase_ : int = 1 , **UpperCamelCase_ : Dict , ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ : Optional[Any] = self.segmentation_processor(
text=[text] , images=[image] , padding='''max_length''' , return_tensors='''pt''' ).to(self.device )
lowerCamelCase_ : Union[str, Any] = self.segmentation_model(**UpperCamelCase_ )
lowerCamelCase_ : Dict = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
lowerCamelCase_ : int = self.numpy_to_pil(UpperCamelCase_ )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
lowerCamelCase_ : List[str] = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , mask_image=UpperCamelCase_ , height=UpperCamelCase_ , width=UpperCamelCase_ , num_inference_steps=UpperCamelCase_ , guidance_scale=UpperCamelCase_ , negative_prompt=UpperCamelCase_ , num_images_per_prompt=UpperCamelCase_ , eta=UpperCamelCase_ , generator=UpperCamelCase_ , latents=UpperCamelCase_ , output_type=UpperCamelCase_ , return_dict=UpperCamelCase_ , callback=UpperCamelCase_ , callback_steps=UpperCamelCase_ , )
| 501 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"microsoft/beit-base-patch16-224-pt22k": (
"https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json"
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class SCREAMING_SNAKE_CASE ( snake_case ):
'''simple docstring'''
__UpperCamelCase = "beit"
def __init__( self , SCREAMING_SNAKE_CASE__=81_92 , SCREAMING_SNAKE_CASE__=7_68 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=30_72 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=2_24 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=[3, 5, 7, 11] , SCREAMING_SNAKE_CASE__=[1, 2, 3, 6] , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=0.4 , SCREAMING_SNAKE_CASE__=2_56 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=2_55 , **SCREAMING_SNAKE_CASE__ , ):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE__ )
snake_case: Optional[Any] = vocab_size
snake_case: int = hidden_size
snake_case: int = num_hidden_layers
snake_case: str = num_attention_heads
snake_case: str = intermediate_size
snake_case: Union[str, Any] = hidden_act
snake_case: List[Any] = hidden_dropout_prob
snake_case: Any = attention_probs_dropout_prob
snake_case: Union[str, Any] = initializer_range
snake_case: List[Any] = layer_norm_eps
snake_case: Tuple = image_size
snake_case: Dict = patch_size
snake_case: List[Any] = num_channels
snake_case: List[str] = use_mask_token
snake_case: Optional[Any] = use_absolute_position_embeddings
snake_case: str = use_relative_position_bias
snake_case: List[str] = use_shared_relative_position_bias
snake_case: Tuple = layer_scale_init_value
snake_case: Dict = drop_path_rate
snake_case: Union[str, Any] = use_mean_pooling
# decode head attributes (semantic segmentation)
snake_case: Any = out_indices
snake_case: Union[str, Any] = pool_scales
# auxiliary head attributes (semantic segmentation)
snake_case: Optional[Any] = use_auxiliary_head
snake_case: int = auxiliary_loss_weight
snake_case: Union[str, Any] = auxiliary_channels
snake_case: Union[str, Any] = auxiliary_num_convs
snake_case: Optional[int] = auxiliary_concat_input
snake_case: Dict = semantic_loss_ignore_index
class SCREAMING_SNAKE_CASE ( snake_case ):
'''simple docstring'''
__UpperCamelCase = version.parse("1.11" )
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
return 1E-4 | 715 |
'''simple docstring'''
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
__UpperCAmelCase = [
# tf -> hf
("/", "."),
("layer_", "layers."),
("kernel", "weight"),
("beta", "bias"),
("gamma", "weight"),
("pegasus", "model"),
]
__UpperCAmelCase = [
(".output.dense", ".fc2"),
("intermediate.LayerNorm", "final_layer_norm"),
("intermediate.dense", "fc1"),
]
__UpperCAmelCase = (
INIT_COMMON
+ [
("attention.self.LayerNorm", "self_attn_layer_norm"),
("attention.output.dense", "self_attn.out_proj"),
("attention.self", "self_attn"),
("attention.encdec.LayerNorm", "encoder_attn_layer_norm"),
("attention.encdec_output.dense", "encoder_attn.out_proj"),
("attention.encdec", "encoder_attn"),
("key", "k_proj"),
("value", "v_proj"),
("query", "q_proj"),
("decoder.LayerNorm", "decoder.layernorm_embedding"),
]
+ END_COMMON
)
__UpperCAmelCase = (
INIT_COMMON
+ [
("embeddings.word_embeddings", "shared.weight"),
("embeddings.position_embeddings", "embed_positions.weight"),
("attention.self.LayerNorm", "self_attn_layer_norm"),
("attention.output.dense", "self_attn.output"),
("attention.self", "self_attn.self"),
("encoder.LayerNorm", "encoder.layernorm_embedding"),
]
+ END_COMMON
)
__UpperCAmelCase = [
"encdec/key/bias",
"encdec/query/bias",
"encdec/value/bias",
"self/key/bias",
"self/query/bias",
"self/value/bias",
"encdec_output/dense/bias",
"attention/output/dense/bias",
]
def lowerCAmelCase_ ( __A : Dict , __A : List[Any] ):
'''simple docstring'''
for tf_name, hf_name in patterns:
snake_case: List[Any] = k.replace(__A , __A )
return k
def lowerCAmelCase_ ( __A : dict , __A : dict ):
'''simple docstring'''
snake_case: Optional[int] = BigBirdPegasusConfig(**__A )
snake_case: List[Any] = BigBirdPegasusForConditionalGeneration(__A )
snake_case: Any = torch_model.state_dict()
snake_case: Any = {}
# separating decoder weights
snake_case: Optional[Any] = {k: tf_weights[k] for k in tf_weights if k.startswith('pegasus/decoder' )}
snake_case: Any = {k: tf_weights[k] for k in tf_weights if not k.startswith('pegasus/decoder' )}
for k, v in tqdm(decoder_weights.items() , 'tf -> hf conversion' ):
snake_case: List[str] = [k.endswith(__A ) for ending in KEYS_TO_IGNORE]
if any(__A ):
continue
snake_case: Any = DECODER_PATTERNS
snake_case: int = rename_state_dict_key(__A , __A )
if new_k not in state_dict:
raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ['dense', 'query', 'key', 'value'] ):
snake_case: Optional[Any] = v.T
snake_case: Any = torch.from_numpy(__A )
assert v.shape == state_dict[new_k].shape, f"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
for k, v in tqdm(remaining_weights.items() , 'tf -> hf conversion' ):
snake_case: List[Any] = [k.endswith(__A ) for ending in KEYS_TO_IGNORE]
if any(__A ):
continue
snake_case: Union[str, Any] = REMAINING_PATTERNS
snake_case: str = rename_state_dict_key(__A , __A )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ['dense', 'query', 'key', 'value'] ):
snake_case: int = v.T
snake_case: Any = torch.from_numpy(__A )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, f"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
snake_case: str = mapping['model.embed_positions.weight']
snake_case: Any = mapping.pop('model.embed_positions.weight' )
snake_case , snake_case: Union[str, Any] = torch_model.load_state_dict(__A , strict=__A )
snake_case: Optional[int] = [
k
for k in missing
if k
not in [
'final_logits_bias',
'model.encoder.embed_tokens.weight',
'model.decoder.embed_tokens.weight',
'lm_head.weight',
]
]
assert unexpected_missing == [], f"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], f"""no matches found for the following tf keys {extra}"""
return torch_model
def lowerCAmelCase_ ( __A : Optional[int] ):
'''simple docstring'''
snake_case: Tuple = tf.train.list_variables(__A )
snake_case: str = {}
snake_case: List[str] = ['global_step']
for name, shape in tqdm(__A , desc='converting tf checkpoint to dict' ):
snake_case: str = any(pat in name for pat in ignore_name )
if skip_key:
continue
snake_case: Any = tf.train.load_variable(__A , __A )
snake_case: Optional[int] = array
return tf_weights
def lowerCAmelCase_ ( __A : str , __A : str , __A : dict ):
'''simple docstring'''
snake_case: int = get_tf_weights_as_numpy(__A )
snake_case: int = convert_bigbird_pegasus(__A , __A )
torch_model.save_pretrained(__A )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("--tf_ckpt_path", type=str, help="passed to tf.train.list_variables")
parser.add_argument("--save_dir", default=None, type=str, help="Path to the output PyTorch model.")
__UpperCAmelCase = parser.parse_args()
__UpperCAmelCase = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update) | 692 | 0 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class A_ ( metaclass=_a ):
lowerCAmelCase__ = ['transformers', 'torch', 'note_seq']
def __init__( self: Union[str, Any] ,*__lowerCAmelCase: List[str] ,**__lowerCAmelCase: List[str] ):
'''simple docstring'''
requires_backends(self ,["transformers", "torch", "note_seq"] )
@classmethod
def _lowercase ( cls: Any ,*__lowerCAmelCase: str ,**__lowerCAmelCase: Dict ):
'''simple docstring'''
requires_backends(cls ,["transformers", "torch", "note_seq"] )
@classmethod
def _lowercase ( cls: Dict ,*__lowerCAmelCase: int ,**__lowerCAmelCase: str ):
'''simple docstring'''
requires_backends(cls ,["transformers", "torch", "note_seq"] ) | 46 |
'''simple docstring'''
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class A_ :
"""simple docstring"""
def __init__( self :Optional[Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[str]=2 , lowerCAmelCase__ :List[Any]=3 , lowerCAmelCase__ :Any=4 , lowerCAmelCase__ :List[Any]=2 , lowerCAmelCase__ :List[str]=7 , lowerCAmelCase__ :Any=True , lowerCAmelCase__ :Optional[int]=True , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :Optional[int]=True , lowerCAmelCase__ :List[str]=99 , lowerCAmelCase__ :Union[str, Any]=36 , lowerCAmelCase__ :Dict=3 , lowerCAmelCase__ :str=4 , lowerCAmelCase__ :Optional[int]=37 , lowerCAmelCase__ :Dict="gelu" , lowerCAmelCase__ :Optional[Any]=0.1 , lowerCAmelCase__ :Dict=0.1 , lowerCAmelCase__ :Optional[int]=512 , lowerCAmelCase__ :Union[str, Any]=16 , lowerCAmelCase__ :List[Any]=2 , lowerCAmelCase__ :Any=0.0_2 , lowerCAmelCase__ :Dict=6 , lowerCAmelCase__ :Optional[int]=6 , lowerCAmelCase__ :Any=3 , lowerCAmelCase__ :int=4 , lowerCAmelCase__ :int=None , lowerCAmelCase__ :Any=1_000 , ) -> Any:
'''simple docstring'''
snake_case_ : Optional[int] = parent
snake_case_ : Union[str, Any] = batch_size
snake_case_ : Optional[int] = num_channels
snake_case_ : List[Any] = image_size
snake_case_ : Optional[int] = patch_size
snake_case_ : Union[str, Any] = text_seq_length
snake_case_ : Dict = is_training
snake_case_ : Optional[Any] = use_input_mask
snake_case_ : Union[str, Any] = use_token_type_ids
snake_case_ : Dict = use_labels
snake_case_ : List[str] = vocab_size
snake_case_ : Optional[Any] = hidden_size
snake_case_ : List[str] = num_hidden_layers
snake_case_ : int = num_attention_heads
snake_case_ : List[str] = intermediate_size
snake_case_ : str = hidden_act
snake_case_ : Optional[Any] = hidden_dropout_prob
snake_case_ : Optional[int] = attention_probs_dropout_prob
snake_case_ : Union[str, Any] = max_position_embeddings
snake_case_ : List[Any] = type_vocab_size
snake_case_ : Union[str, Any] = type_sequence_label_size
snake_case_ : List[Any] = initializer_range
snake_case_ : Union[str, Any] = coordinate_size
snake_case_ : int = shape_size
snake_case_ : Tuple = num_labels
snake_case_ : List[Any] = num_choices
snake_case_ : List[str] = scope
snake_case_ : Dict = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
snake_case_ : str = text_seq_length
snake_case_ : Optional[int] = (image_size // patch_size) ** 2 + 1
snake_case_ : str = self.text_seq_length + self.image_seq_length
def _A ( self :Union[str, Any] ) -> Tuple:
'''simple docstring'''
snake_case_ : Dict = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
snake_case_ : str = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
snake_case_ : Optional[Any] = bbox[i, j, 3]
snake_case_ : Any = bbox[i, j, 1]
snake_case_ : Tuple = t
if bbox[i, j, 2] < bbox[i, j, 0]:
snake_case_ : str = bbox[i, j, 2]
snake_case_ : Dict = bbox[i, j, 0]
snake_case_ : Union[str, Any] = t
snake_case_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ : Dict = None
if self.use_input_mask:
snake_case_ : str = random_attention_mask([self.batch_size, self.text_seq_length] )
snake_case_ : Any = None
if self.use_token_type_ids:
snake_case_ : List[str] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
snake_case_ : Union[str, Any] = None
snake_case_ : str = None
if self.use_labels:
snake_case_ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ : List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
snake_case_ : str = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def _A ( self :Dict , lowerCAmelCase__ :Dict , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :str , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :List[str] ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Tuple = LayoutLMvaModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
# text + image
snake_case_ : Tuple = model(lowerCAmelCase__ , pixel_values=lowerCAmelCase__ )
snake_case_ : Optional[int] = model(
lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )
snake_case_ : Optional[int] = model(lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )
snake_case_ : int = model(lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
snake_case_ : List[Any] = model(lowerCAmelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
snake_case_ : Union[str, Any] = model(pixel_values=lowerCAmelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def _A ( self :str , lowerCAmelCase__ :str , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Tuple ) -> List[Any]:
'''simple docstring'''
snake_case_ : str = self.num_labels
snake_case_ : List[Any] = LayoutLMvaForSequenceClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
snake_case_ : Optional[int] = model(
lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _A ( self :Union[str, Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :int , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Any , lowerCAmelCase__ :Union[str, Any] ) -> str:
'''simple docstring'''
snake_case_ : Optional[int] = self.num_labels
snake_case_ : str = LayoutLMvaForTokenClassification(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
snake_case_ : List[Any] = model(
lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def _A ( self :Optional[int] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :str , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :str , lowerCAmelCase__ :int , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :str ) -> Tuple:
'''simple docstring'''
snake_case_ : List[str] = LayoutLMvaForQuestionAnswering(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
snake_case_ : List[Any] = model(
lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , start_positions=lowerCAmelCase__ , end_positions=lowerCAmelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _A ( self :int ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Dict = self.prepare_config_and_inputs()
(
(
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
),
) : Optional[Any] = config_and_inputs
snake_case_ : Tuple = {
"input_ids": input_ids,
"bbox": bbox,
"pixel_values": pixel_values,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class A_ (a_ , a_ , unittest.TestCase ):
"""simple docstring"""
a__ = False
a__ = False
a__ = False
a__ = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
a__ = (
{'''document-question-answering''': LayoutLMvaForQuestionAnswering, '''feature-extraction''': LayoutLMvaModel}
if is_torch_available()
else {}
)
def _A ( self :Optional[Any] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[Any] ) -> List[str]:
'''simple docstring'''
return True
def _A ( self :List[Any] ) -> str:
'''simple docstring'''
snake_case_ : Tuple = LayoutLMvaModelTester(self )
snake_case_ : Optional[int] = ConfigTester(self , config_class=lowerCAmelCase__ , hidden_size=37 )
def _A ( self :Tuple , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Union[str, Any]=False ) -> Any:
'''simple docstring'''
snake_case_ : List[str] = copy.deepcopy(lowerCAmelCase__ )
if model_class in get_values(lowerCAmelCase__ ):
snake_case_ : Optional[Any] = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(lowerCAmelCase__ , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(lowerCAmelCase__ ):
snake_case_ : Union[str, Any] = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ )
elif model_class in get_values(lowerCAmelCase__ ):
snake_case_ : List[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ )
snake_case_ : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ )
elif model_class in [
*get_values(lowerCAmelCase__ ),
]:
snake_case_ : Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ )
elif model_class in [
*get_values(lowerCAmelCase__ ),
]:
snake_case_ : List[str] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=lowerCAmelCase__ , )
return inputs_dict
def _A ( self :Any ) -> Any:
'''simple docstring'''
self.config_tester.run_common_tests()
def _A ( self :int ) -> int:
'''simple docstring'''
snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def _A ( self :Any ) -> Dict:
'''simple docstring'''
snake_case_ : Dict = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case_ : int = type
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def _A ( self :int ) -> str:
'''simple docstring'''
snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase__ )
def _A ( self :List[Any] ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase__ )
def _A ( self :int ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase__ )
@slow
def _A ( self :Tuple ) -> List[Any]:
'''simple docstring'''
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ : str = LayoutLMvaModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def __UpperCAmelCase ( )-> List[str]:
"""simple docstring"""
snake_case_ : List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
class A_ (unittest.TestCase ):
"""simple docstring"""
@cached_property
def _A ( self :Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=lowerCAmelCase__ ) if is_vision_available() else None
@slow
def _A ( self :Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[int] = LayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base" ).to(lowerCAmelCase__ )
snake_case_ : Optional[Any] = self.default_image_processor
snake_case_ : Optional[int] = prepare_img()
snake_case_ : Union[str, Any] = image_processor(images=lowerCAmelCase__ , return_tensors="pt" ).pixel_values.to(lowerCAmelCase__ )
snake_case_ : List[str] = torch.tensor([[1, 2]] )
snake_case_ : Any = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
snake_case_ : Any = model(
input_ids=input_ids.to(lowerCAmelCase__ ) , bbox=bbox.to(lowerCAmelCase__ ) , pixel_values=pixel_values.to(lowerCAmelCase__ ) , )
# verify the logits
snake_case_ : Optional[Any] = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape , lowerCAmelCase__ )
snake_case_ : str = torch.tensor(
[[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCAmelCase__ , atol=1E-4 ) )
| 653 | 0 |
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class A__ :
lowerCAmelCase__ : CommonSchedulerState
# setable values
lowerCAmelCase__ : jnp.ndarray
lowerCAmelCase__ : jnp.ndarray
lowerCAmelCase__ : Optional[int] = None
@classmethod
def a__ ( cls : List[str] , _UpperCAmelCase : CommonSchedulerState , _UpperCAmelCase : jnp.ndarray , _UpperCAmelCase : jnp.ndarray ) -> Optional[int]:
"""simple docstring"""
return cls(common=_UpperCAmelCase , init_noise_sigma=_UpperCAmelCase , timesteps=_UpperCAmelCase )
@dataclass
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : DDPMSchedulerState
class A__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
lowerCAmelCase__ : Optional[Any] = [e.name for e in FlaxKarrasDiffusionSchedulers]
lowerCAmelCase__ : jnp.dtype
@property
def a__ ( self : List[str] ) -> str:
"""simple docstring"""
return True
@register_to_config
def __init__( self : Dict , _UpperCAmelCase : int = 10_00 , _UpperCAmelCase : float = 0.0_001 , _UpperCAmelCase : float = 0.02 , _UpperCAmelCase : str = "linear" , _UpperCAmelCase : Optional[jnp.ndarray] = None , _UpperCAmelCase : str = "fixed_small" , _UpperCAmelCase : bool = True , _UpperCAmelCase : str = "epsilon" , _UpperCAmelCase : jnp.dtype = jnp.floataa , ) -> Tuple:
"""simple docstring"""
__lowercase = dtype
def a__ ( self : Tuple , _UpperCAmelCase : Optional[CommonSchedulerState] = None ) -> DDPMSchedulerState:
"""simple docstring"""
if common is None:
__lowercase = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
__lowercase = jnp.array(1.0 , dtype=self.dtype )
__lowercase = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=_UpperCAmelCase , init_noise_sigma=_UpperCAmelCase , timesteps=_UpperCAmelCase , )
def a__ ( self : List[str] , _UpperCAmelCase : DDPMSchedulerState , _UpperCAmelCase : jnp.ndarray , _UpperCAmelCase : Optional[int] = None ) -> jnp.ndarray:
"""simple docstring"""
return sample
def a__ ( self : Optional[Any] , _UpperCAmelCase : DDPMSchedulerState , _UpperCAmelCase : int , _UpperCAmelCase : Tuple = () ) -> DDPMSchedulerState:
"""simple docstring"""
__lowercase = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
__lowercase = (jnp.arange(0 , _UpperCAmelCase ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=_UpperCAmelCase , timesteps=_UpperCAmelCase , )
def a__ ( self : Tuple , _UpperCAmelCase : DDPMSchedulerState , _UpperCAmelCase : Dict , _UpperCAmelCase : Dict=None , _UpperCAmelCase : Dict=None ) -> Any:
"""simple docstring"""
__lowercase = state.common.alphas_cumprod[t]
__lowercase = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
__lowercase = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
__lowercase = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
__lowercase = jnp.clip(_UpperCAmelCase , a_min=1e-2_0 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
__lowercase = jnp.log(jnp.clip(_UpperCAmelCase , a_min=1e-2_0 ) )
elif variance_type == "fixed_large":
__lowercase = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
__lowercase = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
__lowercase = variance
__lowercase = state.common.betas[t]
__lowercase = (predicted_variance + 1) / 2
__lowercase = frac * max_log + (1 - frac) * min_log
return variance
def a__ ( self : Tuple , _UpperCAmelCase : DDPMSchedulerState , _UpperCAmelCase : jnp.ndarray , _UpperCAmelCase : int , _UpperCAmelCase : jnp.ndarray , _UpperCAmelCase : Optional[jax.random.KeyArray] = None , _UpperCAmelCase : bool = True , ) -> Union[FlaxDDPMSchedulerOutput, Tuple]:
"""simple docstring"""
__lowercase = timestep
if key is None:
__lowercase = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
__lowercase , __lowercase = jnp.split(_UpperCAmelCase , sample.shape[1] , axis=1 )
else:
__lowercase = None
# 1. compute alphas, betas
__lowercase = state.common.alphas_cumprod[t]
__lowercase = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
__lowercase = 1 - alpha_prod_t
__lowercase = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
__lowercase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
__lowercase = model_output
elif self.config.prediction_type == "v_prediction":
__lowercase = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` """
' for the FlaxDDPMScheduler.' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
__lowercase = jnp.clip(_UpperCAmelCase , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__lowercase = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
__lowercase = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__lowercase = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
__lowercase = jax.random.split(_UpperCAmelCase , num=1 )
__lowercase = jax.random.normal(_UpperCAmelCase , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(_UpperCAmelCase , _UpperCAmelCase , predicted_variance=_UpperCAmelCase ) ** 0.5) * noise
__lowercase = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
__lowercase = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=_UpperCAmelCase , state=_UpperCAmelCase )
def a__ ( self : List[Any] , _UpperCAmelCase : DDPMSchedulerState , _UpperCAmelCase : jnp.ndarray , _UpperCAmelCase : jnp.ndarray , _UpperCAmelCase : jnp.ndarray , ) -> jnp.ndarray:
"""simple docstring"""
return add_noise_common(state.common , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def a__ ( self : Tuple , _UpperCAmelCase : DDPMSchedulerState , _UpperCAmelCase : jnp.ndarray , _UpperCAmelCase : jnp.ndarray , _UpperCAmelCase : jnp.ndarray , ) -> jnp.ndarray:
"""simple docstring"""
return get_velocity_common(state.common , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def __len__( self : int ) -> Optional[int]:
"""simple docstring"""
return self.config.num_train_timesteps
| 688 |
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
SCREAMING_SNAKE_CASE__ = get_logger(__name__)
class A__ :
lowerCAmelCase__ : Optional[int] = "dummy_data"
lowerCAmelCase__ : str = "datasets"
lowerCAmelCase__ : Dict = False
def __init__( self : Dict , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : Union[Version, str] , _UpperCAmelCase : Optional[str] = None , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[List[Callable]] = None , ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = 0
__lowercase = dataset_name
__lowercase = cache_dir
__lowercase = use_local_dummy_data
__lowercase = config
# download_callbacks take a single url as input
__lowercase = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
__lowercase = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
__lowercase = str(_UpperCAmelCase )
# to be downloaded
__lowercase = None
__lowercase = None
@property
def a__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
if self._dummy_file is None:
__lowercase = self.download_dummy_data()
return self._dummy_file
@property
def a__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join('dummy' , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join('dummy' , self.version_name )
@property
def a__ ( self : int ) -> Tuple:
"""simple docstring"""
return os.path.join(self.dummy_data_folder , 'dummy_data.zip' )
def a__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
__lowercase = cached_path(
_UpperCAmelCase , cache_dir=self.cache_dir , extract_compressed_file=_UpperCAmelCase , force_extract=_UpperCAmelCase )
return os.path.join(_UpperCAmelCase , self.dummy_file_name )
@property
def a__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def a__ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
if self._bucket_url is None:
__lowercase = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '/' ) )
return self._bucket_url
@property
def a__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , '/' ).split('/' )[:-1] )
def a__ ( self : Union[str, Any] , _UpperCAmelCase : List[str] , *_UpperCAmelCase : Tuple ) -> Dict:
"""simple docstring"""
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
__lowercase = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
__lowercase = self.dummy_file_name
# special case when data_url is a dict
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
return self.create_dummy_data_dict(_UpperCAmelCase , _UpperCAmelCase )
elif isinstance(_UpperCAmelCase , (list, tuple) ):
return self.create_dummy_data_list(_UpperCAmelCase , _UpperCAmelCase )
else:
return self.create_dummy_data_single(_UpperCAmelCase , _UpperCAmelCase )
def a__ ( self : Optional[int] , _UpperCAmelCase : Tuple , *_UpperCAmelCase : Optional[int] ) -> List[str]:
"""simple docstring"""
return self.download_and_extract(_UpperCAmelCase )
def a__ ( self : List[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.download_and_extract(_UpperCAmelCase )
def a__ ( self : Dict , _UpperCAmelCase : Tuple , *_UpperCAmelCase : str , **_UpperCAmelCase : str ) -> Optional[int]:
"""simple docstring"""
return path
def a__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
return {}
def a__ ( self : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
for single_url in single_urls:
download_callback(_UpperCAmelCase )
else:
__lowercase = single_urls
download_callback(_UpperCAmelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = [os.path.join(_UpperCAmelCase , urllib.parse.quote_plus(Path(_UpperCAmelCase ).name ) ) for x in single_urls]
else:
__lowercase = single_urls
__lowercase = os.path.join(_UpperCAmelCase , urllib.parse.quote_plus(Path(_UpperCAmelCase ).name ) )
__lowercase = value
# make sure that values are unique
if all(isinstance(_UpperCAmelCase , _UpperCAmelCase ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
__lowercase = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def a__ ( self : Optional[int] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
__lowercase = all(bool(re.findall('[0-9]{3,}-of-[0-9]{3,}' , _UpperCAmelCase ) ) for url in data_url )
__lowercase = all(
url.startswith('https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed' ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
__lowercase = [data_url[0]] * len(_UpperCAmelCase )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(_UpperCAmelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
__lowercase = os.path.join(_UpperCAmelCase , urllib.parse.quote_plus(single_url.split('/' )[-1] ) )
dummy_data_list.append(_UpperCAmelCase )
return dummy_data_list
def a__ ( self : Tuple , _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
for download_callback in self.download_callbacks:
download_callback(_UpperCAmelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
__lowercase = os.path.join(_UpperCAmelCase , urllib.parse.quote_plus(data_url.split('/' )[-1] ) )
if os.path.exists(_UpperCAmelCase ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def a__ ( self : List[str] ) -> Any:
"""simple docstring"""
pass
def a__ ( self : int ) -> str:
"""simple docstring"""
pass
def a__ ( self : Optional[int] , _UpperCAmelCase : List[Any] ) -> Any:
"""simple docstring"""
def _iter_archive_members(_UpperCAmelCase : Optional[Any] ):
# this preserves the order of the members inside the ZIP archive
__lowercase = Path(self.dummy_file ).parent
__lowercase = path.relative_to(_UpperCAmelCase )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
__lowercase = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(_UpperCAmelCase )
__lowercase = Path(_UpperCAmelCase )
__lowercase = _iter_archive_members(_UpperCAmelCase ) if self.use_local_dummy_data else path.rglob('*' )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith(('.', '__') ):
yield file_path.relative_to(_UpperCAmelCase ).as_posix(), file_path.open('rb' )
def a__ ( self : Optional[Any] , _UpperCAmelCase : List[str] ) -> str:
"""simple docstring"""
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = [paths]
for path in paths:
if os.path.isfile(_UpperCAmelCase ):
if os.path.basename(_UpperCAmelCase ).startswith(('.', '__') ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(_UpperCAmelCase ):
if os.path.basename(_UpperCAmelCase ).startswith(('.', '__') ):
continue
dirnames.sort()
for filename in sorted(_UpperCAmelCase ):
if filename.startswith(('.', '__') ):
continue
yield os.path.join(_UpperCAmelCase , _UpperCAmelCase )
| 688 | 1 |
from __future__ import annotations
import math
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> int:
'''simple docstring'''
if depth < 0:
raise ValueError('''Depth cannot be less than 0''' )
if not scores:
raise ValueError('''Scores cannot be empty''' )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , __magic_name__ , __magic_name__ , __magic_name__ ) , minimax(depth + 1 , node_index * 2 + 1 , __magic_name__ , __magic_name__ , __magic_name__ ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , __magic_name__ , __magic_name__ , __magic_name__ ) , minimax(depth + 1 , node_index * 2 + 1 , __magic_name__ , __magic_name__ , __magic_name__ ) , )
)
def snake_case( ) -> None:
'''simple docstring'''
lowercase : Any = [90, 23, 6, 33, 21, 65, 1_23, 3_44_23]
lowercase : str = math.log(len(__magic_name__ ) , 2 )
print(F"""Optimal value : {minimax(0 , 0 , __magic_name__ , __magic_name__ , __magic_name__ )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 217 |
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
lowerCAmelCase_ = logging.getLogger(__name__)
def snake_case( __magic_name__=2 , __magic_name__=3 , __magic_name__=16 , __magic_name__ = 10 , __magic_name__ = 2 ) -> List[Any]:
'''simple docstring'''
def get_dataset(__magic_name__ ):
lowercase : Optional[Any] = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(__magic_name__ , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
lowercase : Any = get_dataset(__magic_name__ )
lowercase : List[str] = get_dataset(__magic_name__ )
lowercase : Optional[Any] = DataLoader(__magic_name__ , shuffle=__magic_name__ , batch_size=__magic_name__ , num_workers=4 )
lowercase : Union[str, Any] = DataLoader(__magic_name__ , shuffle=__magic_name__ , batch_size=__magic_name__ , num_workers=4 )
return (train_dataloader, valid_dataloader)
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None ) -> Optional[int]:
'''simple docstring'''
lowercase : Dict = []
for epoch in range(__magic_name__ ):
# Train quickly
model.train()
for batch in dataloader:
lowercase , lowercase : Union[str, Any] = batch
lowercase : Optional[int] = model(__magic_name__ )
lowercase : str = torch.nn.functional.mse_loss(__magic_name__ , __magic_name__ )
accelerator.backward(__magic_name__ )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class _A ( nn.Module ):
def __init__( self : Dict ) -> Any:
"""simple docstring"""
super().__init__()
lowercase : Optional[int] = nn.Parameter(torch.randn(1 ) )
lowercase : Optional[Any] = nn.Parameter(torch.randn(1 ) )
def __a ( self : Union[str, Any] , _A : Optional[int] ) -> List[Any]:
"""simple docstring"""
return x * self.a + self.b
class _A ( unittest.TestCase ):
def __a ( self : str ) -> Optional[int]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
lowercase : Tuple = DummyModel()
lowercase : List[str] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
lowercase , lowercase : List[str] = dummy_dataloaders()
lowercase : int = ProjectConfiguration(total_limit=1 , project_dir=_A , automatic_checkpoint_naming=_A )
# Train baseline
lowercase : List[Any] = Accelerator(project_config=_A )
lowercase , lowercase , lowercase , lowercase : Dict = accelerator.prepare(
_A , _A , _A , _A )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def __a ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
lowercase : str = DummyModel()
lowercase : Tuple = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
lowercase , lowercase : Tuple = dummy_dataloaders()
# Train baseline
lowercase : Any = Accelerator()
lowercase , lowercase , lowercase , lowercase : Dict = accelerator.prepare(
_A , _A , _A , _A )
# Save initial
lowercase : int = os.path.join(_A , '''initial''' )
accelerator.save_state(_A )
((lowercase) , (lowercase)) : Optional[int] = model.a.item(), model.b.item()
lowercase : Tuple = optimizer.state_dict()
lowercase : List[Any] = train(3 , _A , _A , _A , _A )
((lowercase) , (lowercase)) : Optional[Any] = model.a.item(), model.b.item()
lowercase : Optional[Any] = optimizer.state_dict()
# Train partially
set_seed(42 )
lowercase : List[str] = DummyModel()
lowercase : Any = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
lowercase , lowercase : Any = dummy_dataloaders()
lowercase : Optional[int] = Accelerator()
lowercase , lowercase , lowercase , lowercase : Dict = accelerator.prepare(
_A , _A , _A , _A )
accelerator.load_state(_A )
((lowercase) , (lowercase)) : Any = model.a.item(), model.b.item()
lowercase : Optional[int] = optimizer.state_dict()
self.assertEqual(_A , _A )
self.assertEqual(_A , _A )
self.assertEqual(_A , _A )
lowercase : List[Any] = train(2 , _A , _A , _A , _A )
# Save everything
lowercase : Any = os.path.join(_A , '''checkpoint''' )
accelerator.save_state(_A )
# Load everything back in and make sure all states work
accelerator.load_state(_A )
test_rands += train(1 , _A , _A , _A , _A )
((lowercase) , (lowercase)) : int = model.a.item(), model.b.item()
lowercase : Any = optimizer.state_dict()
self.assertEqual(_A , _A )
self.assertEqual(_A , _A )
self.assertEqual(_A , _A )
self.assertEqual(_A , _A )
def __a ( self : Any ) -> Any:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
lowercase : List[Any] = DummyModel()
lowercase : Tuple = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
lowercase , lowercase : List[Any] = dummy_dataloaders()
lowercase : List[str] = ProjectConfiguration(automatic_checkpoint_naming=_A )
# Train baseline
lowercase : List[str] = Accelerator(project_dir=_A , project_config=_A )
lowercase , lowercase , lowercase , lowercase : Optional[int] = accelerator.prepare(
_A , _A , _A , _A )
# Save initial
accelerator.save_state()
((lowercase) , (lowercase)) : Any = model.a.item(), model.b.item()
lowercase : Dict = optimizer.state_dict()
lowercase : List[Any] = train(3 , _A , _A , _A , _A )
((lowercase) , (lowercase)) : Optional[Any] = model.a.item(), model.b.item()
lowercase : str = optimizer.state_dict()
# Train partially
set_seed(42 )
lowercase : Optional[int] = DummyModel()
lowercase : List[Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
lowercase , lowercase : Tuple = dummy_dataloaders()
lowercase : List[Any] = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=_A )
lowercase : Tuple = Accelerator(project_dir=_A , project_config=_A )
lowercase , lowercase , lowercase , lowercase : Tuple = accelerator.prepare(
_A , _A , _A , _A )
accelerator.load_state(os.path.join(_A , '''checkpoints''' , '''checkpoint_0''' ) )
((lowercase) , (lowercase)) : Optional[Any] = model.a.item(), model.b.item()
lowercase : Union[str, Any] = optimizer.state_dict()
self.assertEqual(_A , _A )
self.assertEqual(_A , _A )
self.assertEqual(_A , _A )
lowercase : Tuple = train(2 , _A , _A , _A , _A )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(_A , '''checkpoints''' , '''checkpoint_1''' ) )
test_rands += train(1 , _A , _A , _A , _A )
((lowercase) , (lowercase)) : str = model.a.item(), model.b.item()
lowercase : Optional[Any] = optimizer.state_dict()
self.assertEqual(_A , _A )
self.assertEqual(_A , _A )
self.assertEqual(_A , _A )
self.assertEqual(_A , _A )
def __a ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
lowercase : Optional[Any] = torch.tensor([1, 2, 3] )
lowercase : Dict = torch.tensor([2, 3, 4] )
lowercase : Union[str, Any] = DummyModel()
lowercase : int = torch.optim.Adam(net.parameters() )
lowercase : str = Accelerator()
with self.assertRaises(_A ) as ve:
accelerator.register_for_checkpointing(_A , _A , _A , _A )
lowercase : Union[str, Any] = str(ve.exception )
self.assertTrue('''Item at index 0''' in message )
self.assertTrue('''Item at index 1''' in message )
self.assertFalse('''Item at index 2''' in message )
self.assertFalse('''Item at index 3''' in message )
def __a ( self : List[Any] ) -> int:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
lowercase : Optional[int] = DummyModel()
lowercase : int = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
lowercase : List[str] = torch.optim.lr_scheduler.StepLR(_A , step_size=1 , gamma=0.99 )
lowercase , lowercase : int = dummy_dataloaders()
lowercase : Dict = ProjectConfiguration(automatic_checkpoint_naming=_A )
# Train baseline
lowercase : Any = Accelerator(project_dir=_A , project_config=_A )
lowercase , lowercase , lowercase , lowercase , lowercase : int = accelerator.prepare(
_A , _A , _A , _A , _A )
# Save initial
accelerator.save_state()
lowercase : int = scheduler.state_dict()
train(3 , _A , _A , _A , _A , _A )
self.assertNotEqual(_A , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(_A , '''checkpoints''' , '''checkpoint_0''' ) )
self.assertEqual(_A , scheduler.state_dict() )
def __a ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
lowercase : List[Any] = DummyModel()
lowercase : str = ProjectConfiguration(automatic_checkpoint_naming=_A , total_limit=2 )
# Train baseline
lowercase : Dict = Accelerator(project_dir=_A , project_config=_A )
lowercase : Optional[int] = accelerator.prepare(_A )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(_A , '''checkpoints''' , '''checkpoint_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_A , '''checkpoints''' , '''checkpoint_9''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_A , '''checkpoints''' , '''checkpoint_10''' ) ) )
@require_cuda
def __a ( self : Union[str, Any] ) -> int:
"""simple docstring"""
lowercase : Optional[int] = ['''torchrun''', f"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
execute_subprocess_async(_A , env=os.environ.copy() )
if __name__ == "__main__":
lowerCAmelCase_ = '/tmp/accelerate/state_checkpointing'
lowerCAmelCase_ = DummyModel()
lowerCAmelCase_ = torch.optim.Adam(params=model.parameters(), lr=1E-3)
lowerCAmelCase_ = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.9_9)
lowerCAmelCase_ , lowerCAmelCase_ = dummy_dataloaders()
lowerCAmelCase_ = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
lowerCAmelCase_ = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
lowerCAmelCase_ , lowerCAmelCase_ = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
lowerCAmelCase_ = group['params'][0].device
break
assert param_device.type == accelerator.device.type
lowerCAmelCase_ = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu')
for group in optimizer.param_groups:
lowerCAmelCase_ = group['params'][0].device
break
assert (
param_device.type == torch.device('cpu').type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device')
for group in optimizer.param_groups:
lowerCAmelCase_ = group['params'][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='Unsupported optimizer map location passed'):
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone() | 217 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
UpperCAmelCase_ = logging.get_logger(__name__)
class __lowercase ( __magic_name__ ):
_a = ["""input_features""", """attention_mask"""]
def __init__( self , UpperCamelCase=80 , UpperCamelCase=1_6000 , UpperCamelCase=80 , UpperCamelCase=0.0 , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=True , **UpperCamelCase , ) -> int:
super().__init__(feature_size=UpperCamelCase , sampling_rate=UpperCamelCase , padding_value=UpperCamelCase , **UpperCamelCase )
__a = num_mel_bins
__a = do_ceptral_normalize
__a = normalize_means
__a = normalize_vars
__a = True
def UpperCamelCase__ ( self , UpperCamelCase , ) -> np.ndarray:
__a = waveform * (2**15) # Kaldi compliance: 16-bit signed integers
__a = torch.from_numpy(UpperCamelCase ).unsqueeze(0 )
__a = ta_kaldi.fbank(UpperCamelCase , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def UpperCamelCase__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase = True , UpperCamelCase = True , UpperCamelCase = 0.0 , ) -> np.ndarray:
# make sure we normalize float32 arrays
if normalize_means:
__a = x[:input_length].mean(axis=0 )
__a = np.subtract(UpperCamelCase , UpperCamelCase )
if normalize_vars:
__a = x[:input_length].std(axis=0 )
__a = np.divide(UpperCamelCase , UpperCamelCase )
if input_length < x.shape[0]:
__a = padding_value
# make sure array is in float32
__a = x.astype(np.floataa )
return x
def UpperCamelCase__ ( self , UpperCamelCase , UpperCamelCase = None ) -> List[np.ndarray]:
__a = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(UpperCamelCase , UpperCamelCase , self.normalize_means , self.normalize_vars , self.padding_value )
for x, n in zip(UpperCamelCase , UpperCamelCase )
]
def __call__( self , UpperCamelCase , UpperCamelCase = False , UpperCamelCase = None , UpperCamelCase = False , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , **UpperCamelCase , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
f" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"
f" {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
__a = isinstance(UpperCamelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"Only mono-channel audio is supported for input to {self}" )
__a = is_batched_numpy or (
isinstance(UpperCamelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__a = [np.asarray(UpperCamelCase , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(UpperCamelCase , np.ndarray ):
__a = np.asarray(UpperCamelCase , dtype=np.floataa )
elif isinstance(UpperCamelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__a = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__a = [raw_speech]
# extract fbank features
__a = [self._extract_fbank_features(UpperCamelCase ) for waveform in raw_speech]
# convert into correct format for padding
__a = BatchFeature({'input_features': features} )
__a = self.pad(
UpperCamelCase , padding=UpperCamelCase , max_length=UpperCamelCase , truncation=UpperCamelCase , pad_to_multiple_of=UpperCamelCase , return_attention_mask=UpperCamelCase , **UpperCamelCase , )
# make sure list is in array format
__a = padded_inputs.get('input_features' )
if isinstance(input_features[0] , UpperCamelCase ):
__a = [np.asarray(UpperCamelCase , dtype=np.floataa ) for feature in input_features]
__a = padded_inputs.get('attention_mask' )
if attention_mask is not None:
__a = [np.asarray(UpperCamelCase , dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
__a = (
np.array(UpperCamelCase , dtype=np.intaa )
if self._get_padding_strategies(UpperCamelCase , max_length=UpperCamelCase ) is not PaddingStrategy.DO_NOT_PAD
else None
)
__a = self.normalize(
padded_inputs['input_features'] , attention_mask=UpperCamelCase )
if return_tensors is not None:
__a = padded_inputs.convert_to_tensors(UpperCamelCase )
return padded_inputs
| 490 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __lowercase ( metaclass=__magic_name__ ):
_a = ["""onnx"""]
def __init__( self , *UpperCamelCase , **UpperCamelCase ) -> str:
requires_backends(self , ['onnx'] )
@classmethod
def UpperCamelCase__ ( cls , *UpperCamelCase , **UpperCamelCase ) -> Tuple:
requires_backends(cls , ['onnx'] )
@classmethod
def UpperCamelCase__ ( cls , *UpperCamelCase , **UpperCamelCase ) -> Tuple:
requires_backends(cls , ['onnx'] )
| 490 | 1 |
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
UpperCAmelCase_ : int = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class __A ( unittest.TestCase ):
def __init__( self :Optional[Any] , __snake_case :Dict , __snake_case :List[str]=7 , __snake_case :Union[str, Any]=3 , __snake_case :int=18 , __snake_case :str=30 , __snake_case :int=4_00 , __snake_case :Dict=None , __snake_case :Tuple=True , __snake_case :Any=True , __snake_case :Tuple=None , ):
'''simple docstring'''
__magic_name__ : str =size if size is not None else {"""height""": 20, """width""": 20}
__magic_name__ : Optional[Any] =parent
__magic_name__ : Optional[Any] =batch_size
__magic_name__ : Optional[int] =num_channels
__magic_name__ : List[Any] =image_size
__magic_name__ : Optional[Any] =min_resolution
__magic_name__ : str =max_resolution
__magic_name__ : List[Any] =size
__magic_name__ : Any =do_normalize
__magic_name__ : int =do_convert_rgb
__magic_name__ : Any =[5_12, 10_24, 20_48, 40_96]
__magic_name__ : List[Any] =patch_size if patch_size is not None else {"""height""": 16, """width""": 16}
def A__ ( self :Dict ):
'''simple docstring'''
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def A__ ( self :Optional[Any] ):
'''simple docstring'''
__magic_name__ : Union[str, Any] ="""https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg"""
__magic_name__ : Any =Image.open(requests.get(__snake_case , stream=__snake_case ).raw ).convert("""RGB""" )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , )
@require_torch
@require_vision
class __A ( UpperCamelCase__ , unittest.TestCase ):
UpperCamelCase = PixaStructImageProcessor if is_vision_available() else None
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =PixaStructImageProcessingTester(self )
@property
def A__ ( self :str ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def A__ ( self :List[Any] ):
'''simple docstring'''
__magic_name__ : Any =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__snake_case , """do_normalize""" ) )
self.assertTrue(hasattr(__snake_case , """do_convert_rgb""" ) )
def A__ ( self :int ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =self.image_processor_tester.prepare_dummy_image()
__magic_name__ : Dict =self.image_processing_class(**self.image_processor_dict )
__magic_name__ : str =20_48
__magic_name__ : Tuple =image_processor(__snake_case , return_tensors="""pt""" , max_patches=__snake_case )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1E-3 , rtol=1E-3 ) )
def A__ ( self :Optional[Any] ):
'''simple docstring'''
__magic_name__ : List[Any] =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__magic_name__ : Optional[int] =prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , Image.Image )
# Test not batched input
__magic_name__ : Union[str, Any] =(
(self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__magic_name__ : Union[str, Any] =image_processor(
image_inputs[0] , return_tensors="""pt""" , max_patches=__snake_case ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__magic_name__ : List[str] =image_processor(
__snake_case , return_tensors="""pt""" , max_patches=__snake_case ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
__magic_name__ : Any =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__magic_name__ : str =prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , Image.Image )
# Test not batched input
__magic_name__ : str =(
(self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""])
* self.image_processor_tester.num_channels
) + 2
__magic_name__ : List[Any] =True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(__snake_case ):
__magic_name__ : Optional[Any] =image_processor(
image_inputs[0] , return_tensors="""pt""" , max_patches=__snake_case ).flattened_patches
__magic_name__ : Optional[int] ="""Hello"""
__magic_name__ : List[Any] =image_processor(
image_inputs[0] , return_tensors="""pt""" , max_patches=__snake_case , header_text=__snake_case ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__magic_name__ : Optional[Any] =image_processor(
__snake_case , return_tensors="""pt""" , max_patches=__snake_case , header_text=__snake_case ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def A__ ( self :Dict ):
'''simple docstring'''
__magic_name__ : Optional[int] =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__magic_name__ : str =prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , numpify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , np.ndarray )
__magic_name__ : List[Any] =(
(self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__magic_name__ : Optional[Any] =image_processor(
image_inputs[0] , return_tensors="""pt""" , max_patches=__snake_case ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__magic_name__ : str =image_processor(
__snake_case , return_tensors="""pt""" , max_patches=__snake_case ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
__magic_name__ : Dict =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__magic_name__ : Union[str, Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , torchify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , torch.Tensor )
# Test not batched input
__magic_name__ : Union[str, Any] =(
(self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__magic_name__ : Tuple =image_processor(
image_inputs[0] , return_tensors="""pt""" , max_patches=__snake_case ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__magic_name__ : str =image_processor(
__snake_case , return_tensors="""pt""" , max_patches=__snake_case ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , )
@require_torch
@require_vision
class __A ( UpperCamelCase__ , unittest.TestCase ):
UpperCamelCase = PixaStructImageProcessor if is_vision_available() else None
def A__ ( self :Optional[Any] ):
'''simple docstring'''
__magic_name__ : Tuple =PixaStructImageProcessingTester(self , num_channels=4 )
__magic_name__ : Optional[int] =3
@property
def A__ ( self :Any ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def A__ ( self :Optional[int] ):
'''simple docstring'''
__magic_name__ : Optional[int] =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__snake_case , """do_normalize""" ) )
self.assertTrue(hasattr(__snake_case , """do_convert_rgb""" ) )
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
__magic_name__ : Optional[int] =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__magic_name__ : Tuple =prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , Image.Image )
# Test not batched input
__magic_name__ : str =(
(self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__magic_name__ : Any =image_processor(
image_inputs[0] , return_tensors="""pt""" , max_patches=__snake_case ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__magic_name__ : List[Any] =image_processor(
__snake_case , return_tensors="""pt""" , max_patches=__snake_case ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 21 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_A = {
"""configuration_roberta_prelayernorm""": [
"""ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""RobertaPreLayerNormConfig""",
"""RobertaPreLayerNormOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"""ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RobertaPreLayerNormForCausalLM""",
"""RobertaPreLayerNormForMaskedLM""",
"""RobertaPreLayerNormForMultipleChoice""",
"""RobertaPreLayerNormForQuestionAnswering""",
"""RobertaPreLayerNormForSequenceClassification""",
"""RobertaPreLayerNormForTokenClassification""",
"""RobertaPreLayerNormModel""",
"""RobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"""TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRobertaPreLayerNormForCausalLM""",
"""TFRobertaPreLayerNormForMaskedLM""",
"""TFRobertaPreLayerNormForMultipleChoice""",
"""TFRobertaPreLayerNormForQuestionAnswering""",
"""TFRobertaPreLayerNormForSequenceClassification""",
"""TFRobertaPreLayerNormForTokenClassification""",
"""TFRobertaPreLayerNormMainLayer""",
"""TFRobertaPreLayerNormModel""",
"""TFRobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"""FlaxRobertaPreLayerNormForCausalLM""",
"""FlaxRobertaPreLayerNormForMaskedLM""",
"""FlaxRobertaPreLayerNormForMultipleChoice""",
"""FlaxRobertaPreLayerNormForQuestionAnswering""",
"""FlaxRobertaPreLayerNormForSequenceClassification""",
"""FlaxRobertaPreLayerNormForTokenClassification""",
"""FlaxRobertaPreLayerNormModel""",
"""FlaxRobertaPreLayerNormPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 258 | 0 |
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = 9, 14 # noqa: F841
__SCREAMING_SNAKE_CASE : Dict = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
__SCREAMING_SNAKE_CASE : List[str] = defaultdict(a__ )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
__SCREAMING_SNAKE_CASE : List[str] = mst(a__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
__SCREAMING_SNAKE_CASE : List[Any] = tuple(answer[:2] )
__SCREAMING_SNAKE_CASE : Optional[int] = tuple(edge[::-1] )
assert edge in result or reverse in result
| 716 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ['''image_processor''', '''tokenizer''']
SCREAMING_SNAKE_CASE__ : int = '''BridgeTowerImageProcessor'''
SCREAMING_SNAKE_CASE__ : Any = ('''RobertaTokenizer''', '''RobertaTokenizerFast''')
def __init__( self :List[Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Tuple ) -> Union[str, Any]:
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ )
def __call__( self :Dict , lowerCAmelCase__ :int , lowerCAmelCase__ :Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :Union[bool, str, PaddingStrategy] = False , lowerCAmelCase__ :Union[bool, str, TruncationStrategy] = None , lowerCAmelCase__ :Optional[int] = None , lowerCAmelCase__ :int = 0 , lowerCAmelCase__ :Optional[int] = None , lowerCAmelCase__ :Optional[bool] = None , lowerCAmelCase__ :Optional[bool] = None , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :Optional[Union[str, TensorType]] = None , **lowerCAmelCase__ :Any , ) -> BatchEncoding:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer(
text=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , stride=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_overflowing_tokens=lowerCAmelCase__ , return_special_tokens_mask=lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , return_length=lowerCAmelCase__ , verbose=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ , )
# add pixel_values + pixel_mask
__SCREAMING_SNAKE_CASE : Optional[int] = self.image_processor(
lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , do_normalize=lowerCAmelCase__ , do_center_crop=lowerCAmelCase__ , **lowerCAmelCase__ )
encoding.update(lowerCAmelCase__ )
return encoding
def __magic_name__( self :Tuple , *lowerCAmelCase__ :Dict , **lowerCAmelCase__ :Any ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
def __magic_name__( self :Optional[Any] , *lowerCAmelCase__ :Optional[int] , **lowerCAmelCase__ :List[str] ) -> Union[str, Any]:
return self.tokenizer.decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
@property
def __magic_name__( self :str ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Dict = self.tokenizer.model_input_names
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 260 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'''facebook/xlm-roberta-xl''': '''https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json''',
'''facebook/xlm-roberta-xxl''': '''https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json''',
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : List[Any] = "xlm-roberta-xl"
def __init__(self ,_lowerCamelCase=250880 ,_lowerCamelCase=2560 ,_lowerCamelCase=36 ,_lowerCamelCase=32 ,_lowerCamelCase=10240 ,_lowerCamelCase="gelu" ,_lowerCamelCase=0.1 ,_lowerCamelCase=0.1 ,_lowerCamelCase=514 ,_lowerCamelCase=1 ,_lowerCamelCase=0.0_2 ,_lowerCamelCase=1E-0_5 ,_lowerCamelCase=1 ,_lowerCamelCase=0 ,_lowerCamelCase=2 ,_lowerCamelCase="absolute" ,_lowerCamelCase=True ,_lowerCamelCase=None ,**_lowerCamelCase ,) -> Any:
'''simple docstring'''
super().__init__(pad_token_id=_lowerCamelCase ,bos_token_id=_lowerCamelCase ,eos_token_id=_lowerCamelCase ,**_lowerCamelCase )
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = hidden_act
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = position_embedding_type
__lowercase = use_cache
__lowercase = classifier_dropout
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
@property
def _UpperCAmelCase (self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
__lowercase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__lowercase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 502 |
'''simple docstring'''
import math
from collections.abc import Iterator
from itertools import takewhile
def _lowerCAmelCase ( lowerCamelCase_ : int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCamelCase_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _lowerCAmelCase ( ):
__lowercase = 2
while True:
if is_prime(lowerCamelCase_ ):
yield num
num += 1
def _lowerCAmelCase ( lowerCamelCase_ : int = 2_0_0_0_0_0_0 ):
return sum(takewhile(lambda lowerCamelCase_ : x < n , prime_generator() ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 502 | 1 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Optional[int] = ["image_processor", "tokenizer"]
A : str = "ViltImageProcessor"
A : Optional[Any] = ("BertTokenizer", "BertTokenizerFast")
def __init__( self : int , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Dict=None , **UpperCAmelCase_ : str):
"""simple docstring"""
a : Tuple = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , UpperCAmelCase_ , )
a : List[Any] = kwargs.pop('feature_extractor')
a : Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.')
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.')
super().__init__(UpperCAmelCase_ , UpperCAmelCase_)
a : Optional[Any] = self.image_processor
def __call__( self : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Union[bool, str, PaddingStrategy] = False , UpperCAmelCase_ : Union[bool, str, TruncationStrategy] = None , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : int = 0 , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Union[str, TensorType]] = None , **UpperCAmelCase_ : List[Any] , ):
"""simple docstring"""
a : Optional[Any] = self.tokenizer(
text=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ , stride=UpperCAmelCase_ , pad_to_multiple_of=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , return_overflowing_tokens=UpperCAmelCase_ , return_special_tokens_mask=UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , return_length=UpperCAmelCase_ , verbose=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ , )
# add pixel_values + pixel_mask
a : Optional[Any] = self.image_processor(UpperCAmelCase_ , return_tensors=UpperCAmelCase_)
encoding.update(UpperCAmelCase_)
return encoding
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : Dict):
"""simple docstring"""
return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Dict , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : Optional[Any]):
"""simple docstring"""
return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_)
@property
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : Optional[int] = self.tokenizer.model_input_names
a : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
@property
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , UpperCAmelCase_ , )
return self.image_processor_class
@property
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , UpperCAmelCase_ , )
return self.image_processor
| 700 | '''simple docstring'''
import numpy
# List of input, output pairs
UpperCamelCase : List[str] = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
UpperCamelCase : Dict = (((515, 22, 13), 555), ((61, 35, 49), 150))
UpperCamelCase : str = [2, 4, 1, 5]
UpperCamelCase : int = len(train_data)
UpperCamelCase : str = 0.0_09
def SCREAMING_SNAKE_CASE__ ( snake_case : Union[str, Any] , snake_case : Any="train" ) -> List[Any]:
"""simple docstring"""
return calculate_hypothesis_value(snake_case , snake_case ) - output(
snake_case , snake_case )
def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] ) -> Dict:
"""simple docstring"""
a : Optional[Any] = 0
for i in range(len(snake_case ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def SCREAMING_SNAKE_CASE__ ( snake_case : Tuple , snake_case : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] , snake_case : Union[str, Any] ) -> Tuple:
"""simple docstring"""
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] , snake_case : Dict=m ) -> Dict:
"""simple docstring"""
a : str = 0
for i in range(snake_case ):
if index == -1:
summation_value += _error(snake_case )
else:
summation_value += _error(snake_case ) * train_data[i][0][index]
return summation_value
def SCREAMING_SNAKE_CASE__ ( snake_case : Union[str, Any] ) -> List[str]:
"""simple docstring"""
a : List[Any] = summation_of_cost_derivative(snake_case , snake_case ) / m
return cost_derivative_value
def SCREAMING_SNAKE_CASE__ ( ) -> List[Any]:
"""simple docstring"""
global parameter_vector
# Tune these values to set a tolerance value for predicted output
a : str = 0.00_00_02
a : List[Any] = 0
a : Any = 0
while True:
j += 1
a : int = [0, 0, 0, 0]
for i in range(0 , len(snake_case ) ):
a : Optional[int] = get_cost_derivative(i - 1 )
a : Tuple = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
snake_case , snake_case , atol=snake_case , rtol=snake_case , ):
break
a : Dict = temp_parameter_vector
print(('Number of iterations:', j) )
def SCREAMING_SNAKE_CASE__ ( ) -> int:
"""simple docstring"""
for i in range(len(snake_case ) ):
print(('Actual output value:', output(snake_case , 'test' )) )
print(('Hypothesis output:', calculate_hypothesis_value(snake_case , 'test' )) )
if __name__ == "__main__":
run_gradient_descent()
print("""\nTesting gradient descent for a linear hypothesis function.\n""")
test_gradient_descent()
| 610 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_snake_case = {
"configuration_graphormer": ["GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "GraphormerConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"GraphormerForGraphClassification",
"GraphormerModel",
"GraphormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 500 |
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"EleutherAI/gpt-j-6B": "https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json",
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'gptj'
lowerCamelCase__ = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self, __a=5_0400, __a=2048, __a=4096, __a=28, __a=16, __a=64, __a=None, __a="gelu_new", __a=0.0, __a=0.0, __a=0.0, __a=1E-5, __a=0.02, __a=True, __a=5_0256, __a=5_0256, __a=False, **__a, ):
'''simple docstring'''
_lowerCAmelCase : Any = vocab_size
_lowerCAmelCase : Dict = n_positions
_lowerCAmelCase : Any = n_embd
_lowerCAmelCase : Optional[Any] = n_layer
_lowerCAmelCase : List[str] = n_head
_lowerCAmelCase : Tuple = n_inner
_lowerCAmelCase : Union[str, Any] = rotary_dim
_lowerCAmelCase : Optional[int] = activation_function
_lowerCAmelCase : List[str] = resid_pdrop
_lowerCAmelCase : int = embd_pdrop
_lowerCAmelCase : int = attn_pdrop
_lowerCAmelCase : Tuple = layer_norm_epsilon
_lowerCAmelCase : Optional[int] = initializer_range
_lowerCAmelCase : Any = use_cache
_lowerCAmelCase : str = bos_token_id
_lowerCAmelCase : List[Any] = eos_token_id
super().__init__(
bos_token_id=__a, eos_token_id=__a, tie_word_embeddings=__a, **__a)
class UpperCAmelCase_ ( a):
def __init__( self, __a, __a = "default", __a = None, __a = False, ):
'''simple docstring'''
super().__init__(__a, task=__a, patching_specs=__a, use_past=__a)
if not getattr(self._config, "pad_token_id", __a):
# TODO: how to do that better?
_lowerCAmelCase : Optional[int] = 0
@property
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}})
if self.use_past:
self.fill_with_past_key_values_(__a, direction="inputs")
_lowerCAmelCase : Tuple = {0: "batch", 1: "past_sequence + sequence"}
else:
_lowerCAmelCase : str = {0: "batch", 1: "sequence"}
return common_inputs
@property
def snake_case__ ( self):
'''simple docstring'''
return self._config.n_layer
@property
def snake_case__ ( self):
'''simple docstring'''
return self._config.n_head
def snake_case__ ( self, __a, __a = -1, __a = -1, __a = False, __a = None, ):
'''simple docstring'''
_lowerCAmelCase : List[str] = super(__a, self).generate_dummy_inputs(
__a, batch_size=__a, seq_length=__a, is_pair=__a, framework=__a)
# We need to order the input in the way they appears in the forward()
_lowerCAmelCase : Any = OrderedDict({"input_ids": common_inputs["input_ids"]})
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.")
else:
import torch
_lowerCAmelCase , _lowerCAmelCase : Tuple = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
_lowerCAmelCase : Optional[int] = seqlen + 2
_lowerCAmelCase : List[Any] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_lowerCAmelCase : Optional[int] = [
(torch.zeros(__a), torch.zeros(__a)) for _ in range(self.num_layers)
]
_lowerCAmelCase : Any = common_inputs["attention_mask"]
if self.use_past:
_lowerCAmelCase : Tuple = ordered_inputs["attention_mask"].dtype
_lowerCAmelCase : Any = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(__a, __a, dtype=__a)], dim=1)
return ordered_inputs
@property
def snake_case__ ( self):
'''simple docstring'''
return 13
| 500 | 1 |
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCAmelCase__( __lowercase , unittest.TestCase ):
UpperCamelCase_ : Tuple = BioGptTokenizer
UpperCamelCase_ : Dict = False
def A_ ( self ) -> int:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_UpperCamelCase = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
_UpperCamelCase = dict(zip(a , range(len(a ) ) ) )
_UpperCamelCase = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
_UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
_UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" ) as fp:
fp.write(json.dumps(a ) )
with open(self.merges_file , """w""" ) as fp:
fp.write("""\n""".join(a ) )
def A_ ( self , a ) -> int:
'''simple docstring'''
_UpperCamelCase = """lower newer"""
_UpperCamelCase = """lower newer"""
return input_text, output_text
def A_ ( self ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = BioGptTokenizer(self.vocab_file , self.merges_file )
_UpperCamelCase = """lower"""
_UpperCamelCase = ["""low""", """er</w>"""]
_UpperCamelCase = tokenizer.tokenize(a )
self.assertListEqual(a , a )
_UpperCamelCase = tokens + ["""<unk>"""]
_UpperCamelCase = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a ) , a )
@slow
def A_ ( self ) -> str:
'''simple docstring'''
_UpperCamelCase = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
_UpperCamelCase = tokenizer.encode("""sequence builders""" , add_special_tokens=a )
_UpperCamelCase = tokenizer.encode("""multi-sequence build""" , add_special_tokens=a )
_UpperCamelCase = tokenizer.build_inputs_with_special_tokens(a )
_UpperCamelCase = tokenizer.build_inputs_with_special_tokens(a , a )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 719 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCAmelCase__ ( __lowercase ):
UpperCamelCase_ : Dict = ["image_processor", "tokenizer"]
UpperCamelCase_ : Union[str, Any] = "ViltImageProcessor"
UpperCamelCase_ : List[str] = ("BertTokenizer", "BertTokenizerFast")
def __init__( self , a=None , a=None , **a ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , a , )
_UpperCamelCase = kwargs.pop("""feature_extractor""" )
_UpperCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(a , a )
_UpperCamelCase = self.image_processor
def __call__( self , a , a = None , a = True , a = False , a = None , a = None , a = 0 , a = None , a = None , a = None , a = False , a = False , a = False , a = False , a = True , a = None , **a , ) -> BatchEncoding:
'''simple docstring'''
_UpperCamelCase = self.tokenizer(
text=a , add_special_tokens=a , padding=a , truncation=a , max_length=a , stride=a , pad_to_multiple_of=a , return_token_type_ids=a , return_attention_mask=a , return_overflowing_tokens=a , return_special_tokens_mask=a , return_offsets_mapping=a , return_length=a , verbose=a , return_tensors=a , **a , )
# add pixel_values + pixel_mask
_UpperCamelCase = self.image_processor(a , return_tensors=a )
encoding.update(a )
return encoding
def A_ ( self , *a , **a ) -> Optional[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*a , **a )
def A_ ( self , *a , **a ) -> int:
'''simple docstring'''
return self.tokenizer.decode(*a , **a )
@property
def A_ ( self ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = self.tokenizer.model_input_names
_UpperCamelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def A_ ( self ) -> Tuple:
'''simple docstring'''
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , a , )
return self.image_processor_class
@property
def A_ ( self ) -> Dict:
'''simple docstring'''
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , a , )
return self.image_processor
| 202 | 0 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_lowercase : List[Any] = logging.get_logger(__name__)
_lowercase : List[str] = {
"speechbrain/m-ctc-t-large": "https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json",
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class _UpperCamelCase ( __snake_case ):
"""simple docstring"""
lowerCAmelCase = 'mctct'
def __init__( self , a__=8065 , a__=1536 , a__=36 , a__=6144 , a__=4 , a__=384 , a__=920 , a__=1e-5 , a__=0.3 , a__="relu" , a__=0.02 , a__=0.3 , a__=0.3 , a__=1 , a__=0 , a__=2 , a__=1 , a__=0.3 , a__=1 , a__=(7,) , a__=(3,) , a__=80 , a__=1 , a__=None , a__="sum" , a__=False , **a__ , ) -> Optional[Any]:
super().__init__(**a__ , pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ )
A = vocab_size
A = hidden_size
A = num_hidden_layers
A = intermediate_size
A = num_attention_heads
A = attention_head_dim
A = max_position_embeddings
A = layer_norm_eps
A = layerdrop
A = hidden_act
A = initializer_range
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = pad_token_id
A = bos_token_id
A = eos_token_id
A = conv_glu_dim
A = conv_dropout
A = num_conv_layers
A = input_feat_per_channel
A = input_channels
A = conv_channels
A = ctc_loss_reduction
A = ctc_zero_infinity
# prevents config testing fail with exporting to json
A = list(a__ )
A = list(a__ )
if len(self.conv_kernel ) != self.num_conv_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.conv_kernel)` == `config.num_conv_layers` """
f'but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, '
f'`config.num_conv_layers = {self.num_conv_layers}`.' )
| 641 |
def _lowerCAmelCase ( UpperCamelCase__: str , UpperCamelCase__: int , UpperCamelCase__: Any=False ) -> str:
"""simple docstring"""
if isinstance(UpperCamelCase__ , UpperCamelCase__ ) and isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A = len(set_a.intersection(UpperCamelCase__ ) )
if alternative_union:
A = len(UpperCamelCase__ ) + len(UpperCamelCase__ )
else:
A = len(set_a.union(UpperCamelCase__ ) )
return intersection / union
if isinstance(UpperCamelCase__ , (list, tuple) ) and isinstance(UpperCamelCase__ , (list, tuple) ):
A = [element for element in set_a if element in set_b]
if alternative_union:
A = len(UpperCamelCase__ ) + len(UpperCamelCase__ )
return len(UpperCamelCase__ ) / union
else:
A = set_a + [element for element in set_b if element not in set_a]
return len(UpperCamelCase__ ) / len(UpperCamelCase__ )
return len(UpperCamelCase__ ) / len(UpperCamelCase__ )
return None
if __name__ == "__main__":
_lowercase : List[Any] = {"a", "b", "c", "d", "e"}
_lowercase : List[Any] = {"c", "d", "e", "f", "h", "i"}
print(jaccard_similarity(set_a, set_b))
| 641 | 1 |
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class a ( unittest.TestCase ):
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : int = torch.nn.Linear(10 , 10 )
_UpperCAmelCase : int = torch.optim.SGD(model.parameters() , 0.1 )
_UpperCAmelCase : Optional[Any] = Accelerator()
_UpperCAmelCase : Optional[Any] = accelerator.prepare(_lowercase )
try:
pickle.loads(pickle.dumps(_lowercase ) )
except Exception as e:
self.fail(f'Accelerated optimizer pickling failed with {e}' )
AcceleratorState._reset_state()
| 707 |
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Any ) -> Optional[int]:
_UpperCAmelCase : Optional[Any] = os.path.join(args.tf_model_dir , "parameters.json" )
_UpperCAmelCase : List[str] = json.loads(open(lowerCAmelCase ).read() )
if not params:
raise ValueError(
F'It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.' )
if not args.output.endswith(".pt" ):
_UpperCAmelCase : Union[str, Any] = args.output + ".pt"
_UpperCAmelCase : Dict = OrderedDict()
with tf.device("/CPU:0" ):
_UpperCAmelCase : int = tf.train.load_checkpoint(args.tf_model_dir )
_UpperCAmelCase : int = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
_UpperCAmelCase : Dict = reader.get_tensor(lowerCAmelCase ).astype(np.floataa )
if key_name.endswith("/adam_m" ) or key_name.endswith("/adam_v" ):
continue
if key_name.startswith("pasts/" ):
if key_name.startswith("pasts/mlp" ):
_UpperCAmelCase : int = int(key_name[9] )
elif key_name.startswith("pasts/out" ):
_UpperCAmelCase : Any = 8
_UpperCAmelCase : str = "model.sqout.%d.weight" % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
_UpperCAmelCase : Optional[Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_UpperCAmelCase : str = torch.tensor(lowerCAmelCase )
elif key_name.startswith("model/moe" ):
_UpperCAmelCase : List[str] = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/switch_gating/kernel" ):
_UpperCAmelCase : Union[str, Any] = "model.blocks.%d.feed_forward.mlp.router.classifier.weight" % player
_UpperCAmelCase : List[str] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_UpperCAmelCase : Tuple = torch.tensor(lowerCAmelCase )
elif key_name.endswith("/softmlp/kernel" ):
_UpperCAmelCase : List[str] = "model.blocks.%d.feed_forward.soft_bypass_mlp.weight" % player
_UpperCAmelCase : List[str] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_UpperCAmelCase : Union[str, Any] = torch.tensor(lowerCAmelCase )
elif key_name.endswith("/wo/kernel" ) or key_name.endswith("/wi/kernel" ):
_UpperCAmelCase : str = key_name[-9:-7]
for i in range(16 ):
_UpperCAmelCase : Optional[int] = "model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight" % (player, i, nlayer)
_UpperCAmelCase : Optional[int] = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
_UpperCAmelCase : Dict = torch.tensor(lowerCAmelCase )
elif key_name.startswith("model/mlp" ):
_UpperCAmelCase : Tuple = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/p1/kernel" ):
_UpperCAmelCase : Any = "model.blocks.%d.feed_forward.mlp.wi.weight" % player
_UpperCAmelCase : int = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_UpperCAmelCase : List[str] = torch.tensor(lowerCAmelCase )
elif key_name.endswith("/p1/bias" ):
_UpperCAmelCase : Optional[Any] = "model.blocks.%d.feed_forward.mlp.wi.bias" % player
_UpperCAmelCase : Tuple = vnp.copy() # same because it is one dimensional
_UpperCAmelCase : Any = torch.tensor(lowerCAmelCase )
elif key_name.endswith("/p2/kernel" ):
_UpperCAmelCase : str = "model.blocks.%d.feed_forward.mlp.wo.weight" % player
_UpperCAmelCase : Union[str, Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_UpperCAmelCase : List[Any] = torch.tensor(lowerCAmelCase )
elif key_name.endswith("/p2/bias" ):
_UpperCAmelCase : int = "model.blocks.%d.feed_forward.mlp.wo.bias" % player
_UpperCAmelCase : Optional[int] = vnp.copy() # same because it is one dimensional
_UpperCAmelCase : str = torch.tensor(lowerCAmelCase )
elif key_name.startswith("model/ln" ):
_UpperCAmelCase : Union[str, Any] = int(key_name[8:].split("/" )[0] )
if key_name.endswith("/b" ):
_UpperCAmelCase : Union[str, Any] = "model.blocks.%d.feed_forward.norm.bias" % player
_UpperCAmelCase : int = vnp.copy() # same because it is one dimensional
_UpperCAmelCase : Union[str, Any] = torch.tensor(lowerCAmelCase )
elif key_name.endswith("/g" ):
_UpperCAmelCase : Optional[Any] = "model.blocks.%d.feed_forward.norm.weight" % player
_UpperCAmelCase : Union[str, Any] = vnp.copy() # same because it is one dimensional
_UpperCAmelCase : List[Any] = torch.tensor(lowerCAmelCase )
elif key_name.startswith("model/att" ):
_UpperCAmelCase : Optional[Any] = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/qkv/kernel" ):
_UpperCAmelCase : List[Any] = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
_UpperCAmelCase : Dict = state[:, 0, :, :]
_UpperCAmelCase : List[Any] = state[:, 1, :, :]
_UpperCAmelCase : Tuple = state[:, 2, :, :]
_UpperCAmelCase : List[str] = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_UpperCAmelCase : int = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_UpperCAmelCase : Tuple = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_UpperCAmelCase : int = "model.blocks.%d.self_attn.self_attn.q_proj.weight" % player
_UpperCAmelCase : Any = torch.tensor(lowerCAmelCase )
_UpperCAmelCase : List[Any] = "model.blocks.%d.self_attn.self_attn.k_proj.weight" % player
_UpperCAmelCase : Any = torch.tensor(lowerCAmelCase )
_UpperCAmelCase : Union[str, Any] = "model.blocks.%d.self_attn.self_attn.v_proj.weight" % player
_UpperCAmelCase : str = torch.tensor(lowerCAmelCase )
elif key_name.endswith("/o/kernel" ):
_UpperCAmelCase : Dict = "model.blocks.%d.self_attn.self_attn.out_proj.weight" % player
_UpperCAmelCase : Tuple = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
_UpperCAmelCase : List[str] = torch.tensor(lowerCAmelCase )
elif key_name.startswith("model/an" ):
_UpperCAmelCase : Optional[int] = int(key_name[8:].split("/" )[0] )
if key_name.endswith("/b" ):
_UpperCAmelCase : Any = "model.blocks.%d.self_attn.norm.bias" % player
_UpperCAmelCase : Any = vnp.copy() # same because it is one dimensional
_UpperCAmelCase : Dict = torch.tensor(lowerCAmelCase )
elif key_name.endswith("/g" ):
_UpperCAmelCase : Dict = "model.blocks.%d.self_attn.norm.weight" % player
_UpperCAmelCase : Any = vnp.copy() # same because it is one dimensional
_UpperCAmelCase : Tuple = torch.tensor(lowerCAmelCase )
elif (
key_name.startswith("model/wte" )
or key_name.startswith("model/wpe" )
or key_name.startswith("model/ete" )
):
_UpperCAmelCase : Optional[Any] = {"wte": "embed_tokens", "wpe": "position_embeddings", "ete": "extra_position_embeddings"}[
key_name[-3:]
]
_UpperCAmelCase : Dict = "model.%s.weight" % nlayer
_UpperCAmelCase : Optional[Any] = vnp.copy() # same in embedded
_UpperCAmelCase : List[Any] = torch.tensor(lowerCAmelCase )
if key_name.startswith("model/wte" ):
_UpperCAmelCase : List[str] = "lm_head.weight"
_UpperCAmelCase : Optional[int] = vnp.copy() # same in embedded
_UpperCAmelCase : List[str] = torch.tensor(lowerCAmelCase )
elif key_name.startswith("model/wob" ):
_UpperCAmelCase : Dict = "final_logits_bias"
_UpperCAmelCase : Optional[Any] = vnp.copy() # same in embedded
_UpperCAmelCase : str = state.reshape((1, -1) )
_UpperCAmelCase : Dict = torch.tensor(lowerCAmelCase )
elif key_name == "model/dense/kernel":
_UpperCAmelCase : List[Any] = "model.last_project.weight"
_UpperCAmelCase : int = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_UpperCAmelCase : str = torch.tensor(lowerCAmelCase )
elif key_name == "model/dense_1/bias":
_UpperCAmelCase : List[Any] = "model.last_project.bias"
_UpperCAmelCase : Any = vnp.copy() # same because it is one dimensional
_UpperCAmelCase : Any = torch.tensor(lowerCAmelCase )
torch.save(lowerCAmelCase , args.output )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser(
description='model converter.', formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('--tf_model_dir', metavar='PATH', type=str, required=True, help='import model')
parser.add_argument('--output', metavar='PATH', type=str, required=True, help='output model')
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 467 | 0 |
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: int ):
snake_case_ : Union[str, Any] = int(lowerCAmelCase_ )
if decimal in (0, 1): # Exit cases for the recursion
return str(lowerCAmelCase_ )
snake_case_ ,snake_case_ : Tuple = divmod(lowerCAmelCase_ , 2 )
return binary_recursive(lowerCAmelCase_ ) + str(lowerCAmelCase_ )
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: str ):
snake_case_ : Dict = str(lowerCAmelCase_ ).strip()
if not number:
raise ValueError("No input value was provided" )
snake_case_ : Optional[int] = "-" if number.startswith("-" ) else ""
snake_case_ : Any = number.lstrip("-" )
if not number.isnumeric():
raise ValueError("Input value is not an integer" )
return f"{negative}0b{binary_recursive(int(lowerCAmelCase_ ) )}"
if __name__ == "__main__":
from doctest import testmod
testmod()
| 666 | import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
"microsoft/git-base": "https://huggingface.co/microsoft/git-base/resolve/main/config.json",
}
class snake_case__ ( _UpperCamelCase ):
_SCREAMING_SNAKE_CASE : Dict = "git_vision_model"
def __init__( self : int , A__ : Union[str, Any]=7_68 , A__ : List[Any]=30_72 , A__ : Tuple=12 , A__ : Optional[Any]=12 , A__ : Optional[int]=3 , A__ : List[str]=2_24 , A__ : Dict=16 , A__ : int="quick_gelu" , A__ : Any=1E-5 , A__ : Tuple=0.0 , A__ : Optional[int]=0.02 , **A__ : List[str] , ) -> Optional[int]:
'''simple docstring'''
super().__init__(**A__ )
snake_case_ : Optional[Any] = hidden_size
snake_case_ : str = intermediate_size
snake_case_ : Optional[Any] = num_hidden_layers
snake_case_ : int = num_attention_heads
snake_case_ : Optional[int] = num_channels
snake_case_ : Union[str, Any] = patch_size
snake_case_ : List[str] = image_size
snake_case_ : List[Any] = initializer_range
snake_case_ : Any = attention_dropout
snake_case_ : Any = layer_norm_eps
snake_case_ : int = hidden_act
@classmethod
def UpperCAmelCase__ ( cls : List[Any] , A__ : Union[str, os.PathLike] , **A__ : Optional[int] ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(A__ )
snake_case_ ,snake_case_ : Tuple = cls.get_config_dict(A__ , **A__ )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("model_type" ) == "git":
snake_case_ : Any = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(A__ , **A__ )
class snake_case__ ( _UpperCamelCase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = "git"
def __init__( self : Any , A__ : List[str]=None , A__ : List[str]=3_05_22 , A__ : Tuple=7_68 , A__ : Tuple=6 , A__ : str=12 , A__ : Any=30_72 , A__ : List[str]="gelu" , A__ : int=0.1 , A__ : Dict=0.1 , A__ : Any=10_24 , A__ : Optional[Any]=0.02 , A__ : Optional[Any]=1E-12 , A__ : Dict=0 , A__ : Any="absolute" , A__ : Tuple=True , A__ : Any=False , A__ : Tuple=1_01 , A__ : Tuple=1_02 , A__ : List[Any]=None , **A__ : List[str] , ) -> int:
'''simple docstring'''
super().__init__(bos_token_id=A__ , eos_token_id=A__ , pad_token_id=A__ , **A__ )
if vision_config is None:
snake_case_ : int = {}
logger.info("vision_config is None. initializing the GitVisionConfig with default values." )
snake_case_ : str = GitVisionConfig(**A__ )
snake_case_ : int = vocab_size
snake_case_ : List[Any] = hidden_size
snake_case_ : Tuple = num_hidden_layers
snake_case_ : List[Any] = num_attention_heads
snake_case_ : Any = hidden_act
snake_case_ : Dict = intermediate_size
snake_case_ : Any = hidden_dropout_prob
snake_case_ : Any = attention_probs_dropout_prob
snake_case_ : Union[str, Any] = max_position_embeddings
snake_case_ : List[str] = initializer_range
snake_case_ : List[str] = layer_norm_eps
snake_case_ : Any = position_embedding_type
snake_case_ : Union[str, Any] = use_cache
snake_case_ : str = tie_word_embeddings
snake_case_ : List[Any] = num_image_with_embedding
snake_case_ : Dict = bos_token_id
snake_case_ : int = eos_token_id
def UpperCAmelCase__ ( self : Any ) -> int:
'''simple docstring'''
snake_case_ : Tuple = copy.deepcopy(self.__dict__ )
snake_case_ : Optional[int] = self.vision_config.to_dict()
snake_case_ : Tuple = self.__class__.model_type
return output
| 666 | 1 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowercase_ ( lowerCAmelCase__ ):
__UpperCamelCase = ["image_processor", "tokenizer"]
__UpperCamelCase = "ViTImageProcessor"
__UpperCamelCase = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self: str, _lowercase: Optional[int]=None, _lowercase: Union[str, Any]=None, **_lowercase: str):
'''simple docstring'''
__lowerCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""", _lowercase, )
__lowerCAmelCase = kwargs.pop("""feature_extractor""")
__lowerCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""")
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""")
super().__init__(_lowercase, _lowercase)
def __call__( self: Optional[int], _lowercase: Optional[Any]=None, _lowercase: Dict=None, _lowercase: List[Any]=None, _lowercase: int=None, **_lowercase: List[Any]):
'''simple docstring'''
if text is None and visual_prompt is None and images is None:
raise ValueError("""You have to specify either text, visual prompt or images.""")
if text is not None and visual_prompt is not None:
raise ValueError("""You have to specify exactly one type of prompt. Either text or visual prompt.""")
if text is not None:
__lowerCAmelCase = self.tokenizer(_lowercase, return_tensors=_lowercase, **_lowercase)
if visual_prompt is not None:
__lowerCAmelCase = self.image_processor(_lowercase, return_tensors=_lowercase, **_lowercase)
if images is not None:
__lowerCAmelCase = self.image_processor(_lowercase, return_tensors=_lowercase, **_lowercase)
if visual_prompt is not None and images is not None:
__lowerCAmelCase = {
"""pixel_values""": image_features.pixel_values,
"""conditional_pixel_values""": prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
__lowerCAmelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
__lowerCAmelCase = {
"""conditional_pixel_values""": prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**_lowercase), tensor_type=_lowercase)
def _lowercase ( self: Optional[Any], *_lowercase: Dict, **_lowercase: int):
'''simple docstring'''
return self.tokenizer.batch_decode(*_lowercase, **_lowercase)
def _lowercase ( self: Dict, *_lowercase: Union[str, Any], **_lowercase: List[Any]):
'''simple docstring'''
return self.tokenizer.decode(*_lowercase, **_lowercase)
@property
def _lowercase ( self: Dict):
'''simple docstring'''
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""", _lowercase, )
return self.image_processor_class
@property
def _lowercase ( self: List[str]):
'''simple docstring'''
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""", _lowercase, )
return self.image_processor
| 706 |
def UpperCAmelCase ( UpperCamelCase__ ) -> str:
'''simple docstring'''
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise TypeError("""'float' object cannot be interpreted as an integer""" )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise TypeError("""'str' object cannot be interpreted as an integer""" )
if num == 0:
return "0b0"
__lowerCAmelCase = False
if num < 0:
__lowerCAmelCase = True
__lowerCAmelCase = -num
__lowerCAmelCase = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(UpperCamelCase__ ) for e in binary )
return "0b" + "".join(str(UpperCamelCase__ ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 334 | 0 |
"""simple docstring"""
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
UpperCAmelCase__ = random.Random()
def __UpperCAmelCase ( lowercase ,lowercase=1.0 ,lowercase=None ,lowercase=None ):
"""simple docstring"""
if rng is None:
_UpperCAmelCase = global_rng
_UpperCAmelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class a ( unittest.TestCase ):
def __init__( self : Optional[int] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[Any]=7 , __lowerCAmelCase : List[Any]=400 , __lowerCAmelCase : List[Any]=2000 , __lowerCAmelCase : Tuple=2048 , __lowerCAmelCase : Any=128 , __lowerCAmelCase : List[str]=1 , __lowerCAmelCase : str=512 , __lowerCAmelCase : Tuple=30 , __lowerCAmelCase : Optional[int]=4_4100 , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = min_seq_length
_UpperCAmelCase = max_seq_length
_UpperCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_UpperCAmelCase = spectrogram_length
_UpperCAmelCase = feature_size
_UpperCAmelCase = num_audio_channels
_UpperCAmelCase = hop_length
_UpperCAmelCase = chunk_length
_UpperCAmelCase = sampling_rate
def lowerCAmelCase_ ( self : List[Any] ):
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : Any=False , __lowerCAmelCase : int=False ):
def _flatten(__lowerCAmelCase : Tuple ):
return list(itertools.chain(*__lowerCAmelCase ) )
if equal_length:
_UpperCAmelCase = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_UpperCAmelCase = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_UpperCAmelCase = [np.asarray(__lowerCAmelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class a ( lowerCAmelCase_ , unittest.TestCase ):
_snake_case : int = TvltFeatureExtractor
def lowerCAmelCase_ ( self : List[str] ):
_UpperCAmelCase = TvltFeatureExtractionTester(self )
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(__lowerCAmelCase , """spectrogram_length""" ) )
self.assertTrue(hasattr(__lowerCAmelCase , """feature_size""" ) )
self.assertTrue(hasattr(__lowerCAmelCase , """num_audio_channels""" ) )
self.assertTrue(hasattr(__lowerCAmelCase , """hop_length""" ) )
self.assertTrue(hasattr(__lowerCAmelCase , """chunk_length""" ) )
self.assertTrue(hasattr(__lowerCAmelCase , """sampling_rate""" ) )
def lowerCAmelCase_ ( self : Optional[Any] ):
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = feat_extract_first.save_pretrained(__lowerCAmelCase )[0]
check_json_file_has_correct_format(__lowerCAmelCase )
_UpperCAmelCase = self.feature_extraction_class.from_pretrained(__lowerCAmelCase )
_UpperCAmelCase = feat_extract_first.to_dict()
_UpperCAmelCase = feat_extract_second.to_dict()
_UpperCAmelCase = dict_first.pop("""mel_filters""" )
_UpperCAmelCase = dict_second.pop("""mel_filters""" )
self.assertTrue(np.allclose(__lowerCAmelCase , __lowerCAmelCase ) )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = os.path.join(__lowerCAmelCase , """feat_extract.json""" )
feat_extract_first.to_json_file(__lowerCAmelCase )
_UpperCAmelCase = self.feature_extraction_class.from_json_file(__lowerCAmelCase )
_UpperCAmelCase = feat_extract_first.to_dict()
_UpperCAmelCase = feat_extract_second.to_dict()
_UpperCAmelCase = dict_first.pop("""mel_filters""" )
_UpperCAmelCase = dict_second.pop("""mel_filters""" )
self.assertTrue(np.allclose(__lowerCAmelCase , __lowerCAmelCase ) )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
def lowerCAmelCase_ ( self : int ):
# Initialize feature_extractor
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
_UpperCAmelCase = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_UpperCAmelCase = [np.asarray(__lowerCAmelCase ) for speech_input in speech_inputs]
# Test not batched input
_UpperCAmelCase = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
_UpperCAmelCase = feature_extractor(__lowerCAmelCase , return_tensors="""np""" , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
_UpperCAmelCase = feature_extractor(
__lowerCAmelCase , return_tensors="""np""" , sampling_rate=4_4100 , mask_audio=__lowerCAmelCase ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
_UpperCAmelCase = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_UpperCAmelCase = np.asarray(__lowerCAmelCase )
_UpperCAmelCase = feature_extractor(__lowerCAmelCase , return_tensors="""np""" , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : Dict ):
_UpperCAmelCase = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
_UpperCAmelCase = ds.sort("""id""" ).select(range(__lowerCAmelCase ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def lowerCAmelCase_ ( self : Optional[int] ):
_UpperCAmelCase = self._load_datasamples(1 )
_UpperCAmelCase = TvltFeatureExtractor()
_UpperCAmelCase = feature_extractor(__lowerCAmelCase , return_tensors="""pt""" ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
_UpperCAmelCase = torch.tensor([[-0.3_032, -0.2_708], [-0.4_434, -0.4_007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , __lowerCAmelCase , atol=1e-4 ) )
| 277 | """simple docstring"""
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class a ( lowerCAmelCase_ ):
_snake_case : Dict = CustomTokenizer
pass
| 277 | 1 |
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def lowerCamelCase__ ( _lowerCamelCase = 100 ):
'''simple docstring'''
_lowerCAmelCase : str = 1
_lowerCAmelCase : int = 2
for i in range(2 , max_n + 1 ):
_lowerCAmelCase : Any = pre_numerator
_lowerCAmelCase : Optional[int] = 2 * i // 3 if i % 3 == 0 else 1
_lowerCAmelCase : str = cur_numerator
_lowerCAmelCase : str = e_cont * pre_numerator + temp
return sum_digits(_lowerCamelCase )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 704 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
_lowerCAmelCase = list[list[float | int]]
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = len(_lowerCamelCase )
_lowerCAmelCase : Matrix = [[0 for _ in range(size + 1 )] for _ in range(_lowerCamelCase )]
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : float
for row in range(_lowerCamelCase ):
for col in range(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = matrix[row][col]
_lowerCAmelCase : Tuple = vector[row][0]
_lowerCAmelCase : Dict = 0
_lowerCAmelCase : Any = 0
while row < size and col < size:
# pivoting
_lowerCAmelCase : Optional[int] = max((abs(augmented[rowa][col] ), rowa) for rowa in range(_lowerCamelCase , _lowerCamelCase ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
_lowerCAmelCase, _lowerCAmelCase : Tuple = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , _lowerCamelCase ):
_lowerCAmelCase : Dict = augmented[rowa][col] / augmented[row][col]
_lowerCAmelCase : Optional[Any] = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , _lowerCamelCase ):
for row in range(_lowerCamelCase ):
_lowerCAmelCase : int = augmented[row][col] / augmented[col][col]
for cola in range(_lowerCamelCase , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(_lowerCamelCase )
]
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = len(_lowerCamelCase )
_lowerCAmelCase : Matrix = [[0 for _ in range(_lowerCamelCase )] for _ in range(_lowerCamelCase )]
_lowerCAmelCase : Matrix = [[0] for _ in range(_lowerCamelCase )]
_lowerCAmelCase : Matrix
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : int
for x_val, y_val in enumerate(_lowerCamelCase ):
for col in range(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = (x_val + 1) ** (size - col - 1)
_lowerCAmelCase : Optional[int] = y_val
_lowerCAmelCase : List[Any] = solve(_lowerCamelCase , _lowerCamelCase )
def interpolated_func(_lowerCamelCase ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(_lowerCamelCase ) )
return interpolated_func
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def lowerCamelCase__ ( _lowerCamelCase = question_function , _lowerCamelCase = 10 ):
'''simple docstring'''
_lowerCAmelCase : list[int] = [func(_lowerCamelCase ) for x_val in range(1 , order + 1 )]
_lowerCAmelCase : list[Callable[[int], int]] = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
_lowerCAmelCase : int = 0
_lowerCAmelCase : Callable[[int], int]
_lowerCAmelCase : int
for poly in polynomials:
_lowerCAmelCase : Any = 1
while func(_lowerCamelCase ) == poly(_lowerCamelCase ):
x_val += 1
ret += poly(_lowerCamelCase )
return ret
if __name__ == "__main__":
print(F'''{solution() = }''')
| 16 | 0 |
'''simple docstring'''
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _snake_case :
def __init__( self , a__ , a__=13 , a__=32 , a__=2 , a__=3 , a__=16 , a__=[32, 64, 128] , a__=[1, 2, 1] , a__=[2, 2, 4] , a__=2 , a__=2.0 , a__=True , a__=0.0 , a__=0.0 , a__=0.1 , a__="gelu" , a__=False , a__=True , a__=0.0_2 , a__=1e-5 , a__=True , a__=None , a__=True , a__=10 , a__=8 , a__=["stage1", "stage2"] , a__=[1, 2] , ) -> List[Any]:
'''simple docstring'''
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = image_size
snake_case_ = patch_size
snake_case_ = num_channels
snake_case_ = embed_dim
snake_case_ = hidden_sizes
snake_case_ = depths
snake_case_ = num_heads
snake_case_ = window_size
snake_case_ = mlp_ratio
snake_case_ = qkv_bias
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = drop_path_rate
snake_case_ = hidden_act
snake_case_ = use_absolute_embeddings
snake_case_ = patch_norm
snake_case_ = layer_norm_eps
snake_case_ = initializer_range
snake_case_ = is_training
snake_case_ = scope
snake_case_ = use_labels
snake_case_ = type_sequence_label_size
snake_case_ = encoder_stride
snake_case_ = out_features
snake_case_ = out_indices
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def lowerCAmelCase__ ( self , a__ , a__ , a__ ) -> List[str]:
'''simple docstring'''
snake_case_ = FocalNetModel(config=a__ )
model.to(a__ )
model.eval()
snake_case_ = model(a__ )
snake_case_ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
snake_case_ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def lowerCAmelCase__ ( self , a__ , a__ , a__ ) -> Any:
'''simple docstring'''
snake_case_ = FocalNetBackbone(config=a__ )
model.to(a__ )
model.eval()
snake_case_ = model(a__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
snake_case_ = None
snake_case_ = FocalNetBackbone(config=a__ )
model.to(a__ )
model.eval()
snake_case_ = model(a__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowerCAmelCase__ ( self , a__ , a__ , a__ ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = FocalNetForMaskedImageModeling(config=a__ )
model.to(a__ )
model.eval()
snake_case_ = model(a__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
snake_case_ = 1
snake_case_ = FocalNetForMaskedImageModeling(a__ )
model.to(a__ )
model.eval()
snake_case_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case_ = model(a__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def lowerCAmelCase__ ( self , a__ , a__ , a__ ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = self.type_sequence_label_size
snake_case_ = FocalNetForImageClassification(a__ )
model.to(a__ )
model.eval()
snake_case_ = model(a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
snake_case_ = 1
snake_case_ = FocalNetForImageClassification(a__ )
model.to(a__ )
model.eval()
snake_case_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case_ = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ = config_and_inputs
snake_case_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _snake_case ( lowercase_ , lowercase_ , unittest.TestCase ):
lowerCAmelCase_ : Any = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
lowerCAmelCase_ : str = (
{"feature-extraction": FocalNetModel, "image-classification": FocalNetForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase_ : str = False
lowerCAmelCase_ : int = False
lowerCAmelCase_ : Tuple = False
lowerCAmelCase_ : int = False
lowerCAmelCase_ : Tuple = False
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ = FocalNetModelTester(self )
snake_case_ = ConfigTester(self , config_class=a__ , embed_dim=37 , has_text_modality=a__ )
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
return
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*a__ )
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*a__ )
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a__ )
@unittest.skip(reason="FocalNet does not use inputs_embeds" )
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="FocalNet does not use feedforward chunking" )
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
snake_case_ = model_class(a__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a__ , nn.Linear ) )
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
snake_case_ = model_class(a__ )
snake_case_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ = [*signature.parameters.keys()]
snake_case_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , a__ )
def lowerCAmelCase__ ( self , a__ , a__ , a__ , a__ ) -> List[str]:
'''simple docstring'''
snake_case_ = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
snake_case_ = model(**self._prepare_for_class(a__ , a__ ) )
snake_case_ = outputs.hidden_states
snake_case_ = getattr(
self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(a__ ) , a__ )
# FocalNet has a different seq_length
snake_case_ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
snake_case_ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
snake_case_ = outputs.reshaped_hidden_states
self.assertEqual(len(a__ ) , a__ )
snake_case_ , snake_case_ , snake_case_ , snake_case_ = reshaped_hidden_states[0].shape
snake_case_ = (
reshaped_hidden_states[0].view(a__ , a__ , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
snake_case_ = True
self.check_hidden_states_output(a__ , a__ , a__ , a__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case_ = True
self.check_hidden_states_output(a__ , a__ , a__ , a__ )
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = 3
snake_case_ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
snake_case_ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
snake_case_ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
snake_case_ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
snake_case_ = True
self.check_hidden_states_output(a__ , a__ , a__ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case_ = True
self.check_hidden_states_output(a__ , a__ , a__ , (padded_height, padded_width) )
@slow
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = FocalNetModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = _config_zero_init(a__ )
for model_class in self.all_model_classes:
snake_case_ = model_class(config=a__ )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@require_vision
@require_torch
class _snake_case ( unittest.TestCase ):
@cached_property
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
return AutoImageProcessor.from_pretrained("microsoft/focalnet-tiny" ) if is_vision_available() else None
@slow
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = FocalNetForImageClassification.from_pretrained("microsoft/focalnet-tiny" ).to(a__ )
snake_case_ = self.default_image_processor
snake_case_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
snake_case_ = image_processor(images=a__ , return_tensors="pt" ).to(a__ )
# forward pass
with torch.no_grad():
snake_case_ = model(**a__ )
# verify the logits
snake_case_ = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , a__ )
snake_case_ = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] ).to(a__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a__ , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class _snake_case ( lowercase_ , unittest.TestCase ):
lowerCAmelCase_ : Union[str, Any] = (FocalNetBackbone,) if is_torch_available() else ()
lowerCAmelCase_ : Any = FocalNetConfig
lowerCAmelCase_ : str = False
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ = FocalNetModelTester(self )
| 400 |
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
_SCREAMING_SNAKE_CASE : Optional[int] = True
except (ImportError, ModuleNotFoundError):
_SCREAMING_SNAKE_CASE : Optional[Any] = False
if NLTK_AVAILABLE:
with FileLock(".lock") as lock:
nltk.download("punkt", quiet=True)
def UpperCamelCase_( snake_case : str ):
'''simple docstring'''
re.sub("<n>" , "" , snake_case ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(snake_case ) )
| 400 | 1 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
lowerCAmelCase__ =logging.get_logger(__name__)
@dataclass
class A__( __magic_name__ ):
lowerCAmelCase = [
'''no_inference''',
'''no_cuda''',
'''no_tpu''',
'''no_speed''',
'''no_memory''',
'''no_env_print''',
'''no_multi_process''',
]
def __init__( self : int , **__SCREAMING_SNAKE_CASE : List[str] ) -> Dict:
"""simple docstring"""
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
__SCREAMING_SNAKE_CASE = deprecated_arg[3:]
setattr(self , __SCREAMING_SNAKE_CASE , not kwargs.pop(__SCREAMING_SNAKE_CASE ) )
logger.warning(
f"""{deprecated_arg} is depreciated. Please use --no_{positive_arg} or"""
f""" {positive_arg}={kwargs[positive_arg]}""" )
__SCREAMING_SNAKE_CASE = kwargs.pop('''torchscript''' , self.torchscript )
__SCREAMING_SNAKE_CASE = kwargs.pop('''torch_xla_tpu_print_metrics''' , self.torch_xla_tpu_print_metrics )
__SCREAMING_SNAKE_CASE = kwargs.pop('''fp16_opt_level''' , self.fpaa_opt_level )
super().__init__(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = field(default=__magic_name__ , metadata={'''help''': '''Trace the models using torchscript'''} )
lowerCAmelCase = field(default=__magic_name__ , metadata={'''help''': '''Print Xla/PyTorch tpu metrics'''} )
lowerCAmelCase = field(
default='''O1''' , metadata={
'''help''': (
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. '''
'''See details at https://nvidia.github.io/apex/amp.html'''
)
} , )
@cached_property
def _a ( self : int ) -> Tuple["torch.device", int]:
"""simple docstring"""
requires_backends(self , ['''torch'''] )
logger.info('''PyTorch: setting up devices''' )
if not self.cuda:
__SCREAMING_SNAKE_CASE = torch.device('''cpu''' )
__SCREAMING_SNAKE_CASE = 0
elif is_torch_tpu_available():
__SCREAMING_SNAKE_CASE = xm.xla_device()
__SCREAMING_SNAKE_CASE = 0
else:
__SCREAMING_SNAKE_CASE = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
__SCREAMING_SNAKE_CASE = torch.cuda.device_count()
return device, n_gpu
@property
def _a ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return is_torch_tpu_available() and self.tpu
@property
def _a ( self : Any ) -> int:
"""simple docstring"""
requires_backends(self , ['''torch'''] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def _a ( self : Optional[int] ) -> "torch.device":
"""simple docstring"""
requires_backends(self , ['''torch'''] )
return self._setup_devices[0]
@property
def _a ( self : Any ) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ['''torch'''] )
return self._setup_devices[1]
@property
def _a ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.n_gpu > 0
| 717 |
"""simple docstring"""
import math
lowerCAmelCase__ =10
lowerCAmelCase__ =7
lowerCAmelCase__ =BALLS_PER_COLOUR * NUM_COLOURS
def _a ( UpperCAmelCase__ = 20 ) -> str:
__SCREAMING_SNAKE_CASE = math.comb(UpperCAmelCase__ , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = math.comb(NUM_BALLS - BALLS_PER_COLOUR , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = NUM_COLOURS * (1 - missing_colour / total)
return f"""{result:.9f}"""
if __name__ == "__main__":
print(solution(20))
| 690 | 0 |
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
@staticmethod
def _A ( *UpperCAmelCase_ : int , **UpperCAmelCase_ : str ):
pass
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : Any = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def _A ( self : Tuple , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any ):
SCREAMING_SNAKE_CASE : int = DepthEstimationPipeline(model=a_ , image_processor=a_ )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def _A ( self : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple ):
SCREAMING_SNAKE_CASE : Optional[Any] = depth_estimator("./tests/fixtures/tests_samples/COCO/000000039769.png" )
self.assertEqual({"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )} , a_ )
import datasets
SCREAMING_SNAKE_CASE : Union[str, Any] = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" )
SCREAMING_SNAKE_CASE : Any = depth_estimator(
[
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA
dataset[0]["file"],
# LA
dataset[1]["file"],
# L
dataset[2]["file"],
] )
self.assertEqual(
[
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
] , a_ , )
@require_tf
@unittest.skip("Depth estimation is not implemented in TF" )
def _A ( self : Tuple ):
pass
@slow
@require_torch
def _A ( self : str ):
SCREAMING_SNAKE_CASE : int = "Intel/dpt-large"
SCREAMING_SNAKE_CASE : Tuple = pipeline("depth-estimation" , model=a_ )
SCREAMING_SNAKE_CASE : Any = depth_estimator("http://images.cocodataset.org/val2017/000000039769.jpg" )
SCREAMING_SNAKE_CASE : List[str] = hashimage(outputs["depth"] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs["predicted_depth"].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs["predicted_depth"].min().item() ) , 2.662 )
@require_torch
def _A ( self : Dict ):
# This is highly irregular to have no small tests.
self.skipTest("There is not hf-internal-testing tiny model for either GLPN nor DPT" )
| 62 |
"""simple docstring"""
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class _lowerCAmelCase ( lowerCamelCase , unittest.TestCase ):
lowercase_ : Tuple = BarthezTokenizer
lowercase_ : List[Any] = BarthezTokenizerFast
lowercase_ : Dict = True
lowercase_ : int = True
def _a ( self ) -> Any:
super().setUp()
_UpperCAmelCase = BarthezTokenizerFast.from_pretrained("moussaKam/mbarthez" )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=a_ )
_UpperCAmelCase = tokenizer
def _a ( self ) -> List[Any]:
_UpperCAmelCase = "<pad>"
_UpperCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a_ ) , a_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a_ ) , a_ )
def _a ( self ) -> List[Any]:
_UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(a_ ) , 101122 )
def _a ( self ) -> Union[str, Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 101122 )
@require_torch
def _a ( self ) -> List[Any]:
_UpperCAmelCase = ["A long paragraph for summarization.", "Another paragraph for summarization."]
_UpperCAmelCase = [0, 57, 3018, 70307, 91, 2]
_UpperCAmelCase = self.tokenizer(
a_ , max_length=len(a_ ) , padding=a_ , truncation=a_ , return_tensors="pt" )
self.assertIsInstance(a_ , a_ )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
_UpperCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(a_ , a_ )
def _a ( self ) -> str:
if not self.test_rust_tokenizer:
return
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = "I was born in 92000, and this is falsé."
_UpperCAmelCase = tokenizer.tokenize(a_ )
_UpperCAmelCase = rust_tokenizer.tokenize(a_ )
self.assertListEqual(a_ , a_ )
_UpperCAmelCase = tokenizer.encode(a_ , add_special_tokens=a_ )
_UpperCAmelCase = rust_tokenizer.encode(a_ , add_special_tokens=a_ )
self.assertListEqual(a_ , a_ )
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = tokenizer.encode(a_ )
_UpperCAmelCase = rust_tokenizer.encode(a_ )
self.assertListEqual(a_ , a_ )
@slow
def _a ( self ) -> Dict:
# fmt: off
_UpperCAmelCase = {"input_ids": [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
_UpperCAmelCase = [
"Le transformeur est un modèle d'apprentissage profond introduit en 2017, "
"utilisé principalement dans le domaine du traitement automatique des langues (TAL).",
"À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus "
"pour gérer des données séquentielles, telles que le langage naturel, pour des tâches "
"telles que la traduction et la synthèse de texte.",
]
self.tokenizer_integration_test_util(
expected_encoding=a_ , model_name="moussaKam/mbarthez" , revision="c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6" , sequences=a_ , )
| 657 | 0 |
"""simple docstring"""
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
_UpperCamelCase = logging.get_logger(__name__)
def lowerCAmelCase_ ( ):
'''simple docstring'''
__lowerCamelCase : List[str] =os.getenv('''SM_HP_MP_PARAMETERS''' , '''{}''' )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
__lowerCamelCase : Tuple =json.loads(SCREAMING_SNAKE_CASE )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
__lowerCamelCase : Tuple =os.getenv('''SM_FRAMEWORK_PARAMS''' , '''{}''' )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
__lowerCamelCase : Dict =json.loads(SCREAMING_SNAKE_CASE )
if not mpi_options.get('''sagemaker_mpi_enabled''' , SCREAMING_SNAKE_CASE ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec('''smdistributed''' ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class SCREAMING_SNAKE_CASE_ ( snake_case__ ):
"""simple docstring"""
__snake_case : str = field(
default="""""" , metadata={"""help""": """Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer"""} , )
def __lowercase ( self :str ):
super().__post_init__()
warnings.warn(
'''`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use '''
'''`TrainingArguments` instead.''' , __lowercase , )
@cached_property
def __lowercase ( self :Tuple ):
logger.info('''PyTorch: setting up devices''' )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
'''torch.distributed process group is initialized, but local_rank == -1. '''
'''In order to use Torch DDP, launch your script with `python -m torch.distributed.launch''' )
if self.no_cuda:
__lowerCamelCase : List[Any] =torch.device('''cpu''' )
__lowerCamelCase : str =0
elif is_sagemaker_model_parallel_available():
__lowerCamelCase : Tuple =smp.local_rank()
__lowerCamelCase : int =torch.device('''cuda''' , __lowercase )
__lowerCamelCase : Optional[Any] =1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend='''smddp''' , timeout=self.ddp_timeout_delta )
__lowerCamelCase : List[Any] =int(os.getenv('''SMDATAPARALLEL_LOCAL_RANK''' ) )
__lowerCamelCase : List[Any] =torch.device('''cuda''' , self.local_rank )
__lowerCamelCase : Optional[int] =1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
__lowerCamelCase : List[Any] =torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
__lowerCamelCase : str =torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend='''nccl''' , timeout=self.ddp_timeout_delta )
__lowerCamelCase : Tuple =torch.device('''cuda''' , self.local_rank )
__lowerCamelCase : Any =1
if device.type == "cuda":
torch.cuda.set_device(__lowercase )
return device
@property
def __lowercase ( self :Any ):
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def __lowercase ( self :Union[str, Any] ):
return not is_sagemaker_model_parallel_available()
@property
def __lowercase ( self :Tuple ):
return False
| 363 |
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
'huggingface/time-series-transformer-tourism-monthly': (
'https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json'
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class SCREAMING_SNAKE_CASE_ ( snake_case__ ):
"""simple docstring"""
__snake_case : Union[str, Any] = """time_series_transformer"""
__snake_case : Any = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self :List[Any] , __lowercase :Optional[int] = None , __lowercase :Optional[int] = None , __lowercase :str = "student_t" , __lowercase :str = "nll" , __lowercase :int = 1 , __lowercase :List[int] = [1, 2, 3, 4, 5, 6, 7] , __lowercase :Optional[Union[str, bool]] = "mean" , __lowercase :int = 0 , __lowercase :int = 0 , __lowercase :int = 0 , __lowercase :int = 0 , __lowercase :Optional[List[int]] = None , __lowercase :Optional[List[int]] = None , __lowercase :int = 32 , __lowercase :int = 32 , __lowercase :int = 2 , __lowercase :int = 2 , __lowercase :int = 2 , __lowercase :int = 2 , __lowercase :bool = True , __lowercase :str = "gelu" , __lowercase :int = 64 , __lowercase :float = 0.1 , __lowercase :float = 0.1 , __lowercase :float = 0.1 , __lowercase :float = 0.1 , __lowercase :float = 0.1 , __lowercase :int = 100 , __lowercase :float = 0.02 , __lowercase :int=True , **__lowercase :Optional[Any] , ):
# time series specific configuration
__lowerCamelCase : Tuple =prediction_length
__lowerCamelCase : List[Any] =context_length or prediction_length
__lowerCamelCase : Dict =distribution_output
__lowerCamelCase : str =loss
__lowerCamelCase : Tuple =input_size
__lowerCamelCase : int =num_time_features
__lowerCamelCase : int =lags_sequence
__lowerCamelCase : Optional[int] =scaling
__lowerCamelCase : str =num_dynamic_real_features
__lowerCamelCase : Optional[Any] =num_static_real_features
__lowerCamelCase : List[Any] =num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(__lowercase ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
__lowerCamelCase : Optional[int] =cardinality
else:
__lowerCamelCase : str =[0]
if embedding_dimension and num_static_categorical_features > 0:
if len(__lowercase ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
__lowerCamelCase : int =embedding_dimension
else:
__lowerCamelCase : Optional[int] =[min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
__lowerCamelCase : List[str] =num_parallel_samples
# Transformer architecture configuration
__lowerCamelCase : str =input_size * len(__lowercase ) + self._number_of_features
__lowerCamelCase : Union[str, Any] =d_model
__lowerCamelCase : int =encoder_attention_heads
__lowerCamelCase : int =decoder_attention_heads
__lowerCamelCase : Dict =encoder_ffn_dim
__lowerCamelCase : int =decoder_ffn_dim
__lowerCamelCase : List[Any] =encoder_layers
__lowerCamelCase : int =decoder_layers
__lowerCamelCase : Union[str, Any] =dropout
__lowerCamelCase : Optional[Any] =attention_dropout
__lowerCamelCase : List[str] =activation_dropout
__lowerCamelCase : List[str] =encoder_layerdrop
__lowerCamelCase : int =decoder_layerdrop
__lowerCamelCase : Tuple =activation_function
__lowerCamelCase : str =init_std
__lowerCamelCase : Dict =use_cache
super().__init__(is_encoder_decoder=__lowercase , **__lowercase )
@property
def __lowercase ( self :int ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 363 | 1 |
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase__ ( __magic_name__ : str , __magic_name__ : str ) -> bool:
'''simple docstring'''
snake_case__ : Union[str, Any] = get_failure_array(__magic_name__ )
# 2) Step through text searching for pattern
snake_case__ , snake_case__ : List[str] = 0, 0 # index into text, pattern
while i < len(__magic_name__ ):
if pattern[j] == text[i]:
if j == (len(__magic_name__ ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
snake_case__ : Dict = failure[j - 1]
continue
i += 1
return False
def UpperCamelCase__ ( __magic_name__ : str ) -> list[int]:
'''simple docstring'''
snake_case__ : Union[str, Any] = [0]
snake_case__ : int = 0
snake_case__ : Any = 1
while j < len(__magic_name__ ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
snake_case__ : List[str] = failure[i - 1]
continue
j += 1
failure.append(__magic_name__ )
return failure
if __name__ == "__main__":
# Test 1)
A_ : Optional[Any] = "abc1abc12"
A_ : List[str] = "alskfjaldsabc1abc1abc12k23adsfabcabc"
A_ : Union[str, Any] = "alskfjaldsk23adsfabcabc"
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
A_ : Dict = "ABABX"
A_ : int = "ABABZABABYABABX"
assert kmp(pattern, text)
# Test 3)
A_ : List[Any] = "AAAB"
A_ : List[str] = "ABAAAAAB"
assert kmp(pattern, text)
# Test 4)
A_ : Optional[int] = "abcdabcy"
A_ : List[str] = "abcxabcdabxabcdabcdabcy"
assert kmp(pattern, text)
# Test 5)
A_ : Tuple = "aabaabaaa"
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 38 |
from __future__ import annotations
def _a ( lowerCamelCase ):
lowerCamelCase : Union[str, Any] = str(lowerCamelCase )
return n == n[::-1]
def _a ( lowerCamelCase = 100_0000 ):
lowerCamelCase : Any = 0
for i in range(1, lowerCamelCase ):
if is_palindrome(lowerCamelCase ) and is_palindrome(bin(lowerCamelCase ).split("""b""" )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 681 | 0 |
from __future__ import annotations
snake_case_ : int = []
def __UpperCAmelCase ( snake_case_ : Optional[Any] , snake_case_ : Union[str, Any] , snake_case_ : int ):
'''simple docstring'''
for i in range(len(__A ) ):
if board[row][i] == 1:
return False
for i in range(len(__A ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(__A , -1 , -1 ) , range(__A , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(__A , -1 , -1 ) , range(__A , len(__A ) ) ):
if board[i][j] == 1:
return False
return True
def __UpperCAmelCase ( snake_case_ : Tuple , snake_case_ : Optional[int] ):
'''simple docstring'''
if row >= len(__A ):
solution.append(__A )
printboard(__A )
print()
return True
for i in range(len(__A ) ):
if is_safe(__A , __A , __A ):
UpperCAmelCase: int = 1
solve(__A , row + 1 )
UpperCAmelCase: List[str] = 0
return False
def __UpperCAmelCase ( snake_case_ : List[str] ):
'''simple docstring'''
for i in range(len(__A ) ):
for j in range(len(__A ) ):
if board[i][j] == 1:
print("Q" , end=" " )
else:
print("." , end=" " )
print()
# n=int(input("The no. of queens"))
snake_case_ : int = 8
snake_case_ : Union[str, Any] = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('The total no. of solutions are :', len(solution))
| 715 |
from collections import deque
class __lowerCamelCase :
def __init__( self , __snake_case , __snake_case , __snake_case ) -> None:
"""simple docstring"""
UpperCAmelCase: Tuple = process_name # process name
UpperCAmelCase: Optional[int] = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
UpperCAmelCase: Tuple = arrival_time
UpperCAmelCase: List[Any] = burst_time # remaining burst time
UpperCAmelCase: Optional[Any] = 0 # total time of the process wait in ready queue
UpperCAmelCase: List[Any] = 0 # time from arrival time to completion time
class __lowerCamelCase :
def __init__( self , __snake_case , __snake_case , __snake_case , __snake_case , ) -> None:
"""simple docstring"""
UpperCAmelCase: str = number_of_queues
# time slice of queues that round robin algorithm applied
UpperCAmelCase: str = time_slices
# unfinished process is in this ready_queue
UpperCAmelCase: Union[str, Any] = queue
# current time
UpperCAmelCase: Any = current_time
# finished process is in this sequence queue
UpperCAmelCase: deque[Process] = deque()
def A__ ( self ) -> list[str]:
"""simple docstring"""
UpperCAmelCase: str = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def A__ ( self , __snake_case ) -> list[int]:
"""simple docstring"""
UpperCAmelCase: List[Any] = []
for i in range(len(__snake_case ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def A__ ( self , __snake_case ) -> list[int]:
"""simple docstring"""
UpperCAmelCase: List[Any] = []
for i in range(len(__snake_case ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def A__ ( self , __snake_case ) -> list[int]:
"""simple docstring"""
UpperCAmelCase: Tuple = []
for i in range(len(__snake_case ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def A__ ( self , __snake_case ) -> list[int]:
"""simple docstring"""
return [q.burst_time for q in queue]
def A__ ( self , __snake_case ) -> int:
"""simple docstring"""
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def A__ ( self , __snake_case ) -> deque[Process]:
"""simple docstring"""
UpperCAmelCase: deque[Process] = deque() # sequence deque of finished process
while len(__snake_case ) != 0:
UpperCAmelCase: Union[str, Any] = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(__snake_case )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
UpperCAmelCase: Optional[int] = 0
# set the process's turnaround time because it is finished
UpperCAmelCase: Union[str, Any] = self.current_time - cp.arrival_time
# set the completion time
UpperCAmelCase: str = self.current_time
# add the process to queue that has finished queue
finished.append(__snake_case )
self.finish_queue.extend(__snake_case ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def A__ ( self , __snake_case , __snake_case ) -> tuple[deque[Process], deque[Process]]:
"""simple docstring"""
UpperCAmelCase: deque[Process] = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(__snake_case ) ):
UpperCAmelCase: Optional[Any] = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(__snake_case )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
UpperCAmelCase: Union[str, Any] = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(__snake_case )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
UpperCAmelCase: Optional[Any] = 0
# set the finish time
UpperCAmelCase: Dict = self.current_time
# update the process' turnaround time because it is finished
UpperCAmelCase: Optional[Any] = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(__snake_case )
self.finish_queue.extend(__snake_case ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def A__ ( self ) -> deque[Process]:
"""simple docstring"""
for i in range(self.number_of_queues - 1 ):
UpperCAmelCase , UpperCAmelCase: Dict = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
snake_case_ : Tuple = Process('P1', 0, 5_3)
snake_case_ : List[str] = Process('P2', 0, 1_7)
snake_case_ : Optional[Any] = Process('P3', 0, 6_8)
snake_case_ : int = Process('P4', 0, 2_4)
snake_case_ : Optional[Any] = 3
snake_case_ : Union[str, Any] = [1_7, 2_5]
snake_case_ : Any = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={'queue': deque([Pa, Pa, Pa, Pa])})
snake_case_ : List[Any] = Process('P1', 0, 5_3)
snake_case_ : Optional[Any] = Process('P2', 0, 1_7)
snake_case_ : Optional[int] = Process('P3', 0, 6_8)
snake_case_ : List[Any] = Process('P4', 0, 2_4)
snake_case_ : Tuple = 3
snake_case_ : Union[str, Any] = [1_7, 2_5]
snake_case_ : Optional[int] = deque([Pa, Pa, Pa, Pa])
snake_case_ : Union[str, Any] = MLFQ(number_of_queues, time_slices, queue, 0)
snake_case_ : int = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
f"""waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print completion times of processes(P1, P2, P3, P4)
print(
f"""completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
f"""turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print sequence of finished processes
print(
f"""sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}"""
)
| 166 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger(__name__)
def lowerCAmelCase_ ( lowercase: Optional[int] ) -> Any:
'''simple docstring'''
_UpperCamelCase: List[str] = SwinConfig(
embed_dim=192 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=['''stage2''', '''stage3''', '''stage4'''] , )
_UpperCamelCase: int = DetaConfig(
backbone_config=lowercase , num_queries=900 , encoder_ffn_dim=2_048 , decoder_ffn_dim=2_048 , num_feature_levels=5 , assign_first_stage=lowercase , with_box_refine=lowercase , two_stage=lowercase , )
# set labels
_UpperCamelCase: Tuple = '''huggingface/label-files'''
if "o365" in model_name:
_UpperCamelCase: int = 366
_UpperCamelCase: List[str] = '''object365-id2label.json'''
else:
_UpperCamelCase: Dict = 91
_UpperCamelCase: Dict = '''coco-detection-id2label.json'''
_UpperCamelCase: Optional[Any] = num_labels
_UpperCamelCase: Dict = json.load(open(cached_download(hf_hub_url(lowercase , lowercase , repo_type='''dataset''' ) ) , '''r''' ) )
_UpperCamelCase: int = {int(lowercase ): v for k, v in idalabel.items()}
_UpperCamelCase: Dict = idalabel
_UpperCamelCase: Any = {v: k for k, v in idalabel.items()}
return config
def lowerCAmelCase_ ( lowercase: List[Any] ) -> Dict:
'''simple docstring'''
_UpperCamelCase: Any = []
# stem
# fmt: off
rename_keys.append(('''backbone.0.body.patch_embed.proj.weight''', '''model.backbone.model.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.0.body.patch_embed.proj.bias''', '''model.backbone.model.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.0.body.patch_embed.norm.weight''', '''model.backbone.model.embeddings.norm.weight''') )
rename_keys.append(('''backbone.0.body.patch_embed.norm.bias''', '''model.backbone.model.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.norm1.weight""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.norm1.bias""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.norm2.weight""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.norm2.bias""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((F"""backbone.0.body.layers.{i}.downsample.reduction.weight""", F"""model.backbone.model.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.downsample.norm.weight""", F"""model.backbone.model.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.downsample.norm.bias""", F"""model.backbone.model.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append(('''backbone.0.body.norm1.weight''', '''model.backbone.model.hidden_states_norms.stage2.weight''') )
rename_keys.append(('''backbone.0.body.norm1.bias''', '''model.backbone.model.hidden_states_norms.stage2.bias''') )
rename_keys.append(('''backbone.0.body.norm2.weight''', '''model.backbone.model.hidden_states_norms.stage3.weight''') )
rename_keys.append(('''backbone.0.body.norm2.bias''', '''model.backbone.model.hidden_states_norms.stage3.bias''') )
rename_keys.append(('''backbone.0.body.norm3.weight''', '''model.backbone.model.hidden_states_norms.stage4.weight''') )
rename_keys.append(('''backbone.0.body.norm3.bias''', '''model.backbone.model.hidden_states_norms.stage4.bias''') )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((F"""transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight""", F"""model.encoder.layers.{i}.self_attn.sampling_offsets.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias""", F"""model.encoder.layers.{i}.self_attn.sampling_offsets.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.self_attn.attention_weights.weight""", F"""model.encoder.layers.{i}.self_attn.attention_weights.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.self_attn.attention_weights.bias""", F"""model.encoder.layers.{i}.self_attn.attention_weights.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.self_attn.value_proj.weight""", F"""model.encoder.layers.{i}.self_attn.value_proj.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.self_attn.value_proj.bias""", F"""model.encoder.layers.{i}.self_attn.value_proj.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.self_attn.output_proj.weight""", F"""model.encoder.layers.{i}.self_attn.output_proj.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.self_attn.output_proj.bias""", F"""model.encoder.layers.{i}.self_attn.output_proj.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.weight""", F"""model.encoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.bias""", F"""model.encoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""model.encoder.layers.{i}.fc1.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""model.encoder.layers.{i}.fc1.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""model.encoder.layers.{i}.fc2.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""model.encoder.layers.{i}.fc2.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.weight""", F"""model.encoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""model.encoder.layers.{i}.final_layer_norm.bias""") )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((F"""transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight""", F"""model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias""", F"""model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.cross_attn.attention_weights.weight""", F"""model.decoder.layers.{i}.encoder_attn.attention_weights.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.cross_attn.attention_weights.bias""", F"""model.decoder.layers.{i}.encoder_attn.attention_weights.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.cross_attn.value_proj.weight""", F"""model.decoder.layers.{i}.encoder_attn.value_proj.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.cross_attn.value_proj.bias""", F"""model.decoder.layers.{i}.encoder_attn.value_proj.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.cross_attn.output_proj.weight""", F"""model.decoder.layers.{i}.encoder_attn.output_proj.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.cross_attn.output_proj.bias""", F"""model.decoder.layers.{i}.encoder_attn.output_proj.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.weight""", F"""model.decoder.layers.{i}.encoder_attn_layer_norm.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.bias""", F"""model.decoder.layers.{i}.encoder_attn_layer_norm.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", F"""model.decoder.layers.{i}.self_attn.out_proj.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""model.decoder.layers.{i}.self_attn.out_proj.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.norm2.weight""", F"""model.decoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.norm2.bias""", F"""model.decoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""model.decoder.layers.{i}.fc1.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""model.decoder.layers.{i}.fc1.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""model.decoder.layers.{i}.fc2.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""model.decoder.layers.{i}.fc2.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.weight""", F"""model.decoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""model.decoder.layers.{i}.final_layer_norm.bias""") )
# fmt: on
return rename_keys
def lowerCAmelCase_ ( lowercase: Union[str, Any] , lowercase: str , lowercase: Optional[Any] ) -> Any:
'''simple docstring'''
_UpperCamelCase: List[str] = dct.pop(lowercase )
_UpperCamelCase: Dict = val
def lowerCAmelCase_ ( lowercase: Union[str, Any] , lowercase: str ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase: Dict = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
_UpperCamelCase: Union[str, Any] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
_UpperCamelCase: Optional[int] = state_dict.pop(F"""backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight""" )
_UpperCamelCase: str = state_dict.pop(F"""backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_UpperCamelCase: Optional[Any] = in_proj_weight[:dim, :]
_UpperCamelCase: List[str] = in_proj_bias[: dim]
_UpperCamelCase: Dict = in_proj_weight[
dim : dim * 2, :
]
_UpperCamelCase: List[str] = in_proj_bias[
dim : dim * 2
]
_UpperCamelCase: List[str] = in_proj_weight[
-dim :, :
]
_UpperCamelCase: int = in_proj_bias[-dim :]
# fmt: on
def lowerCAmelCase_ ( lowercase: Dict , lowercase: Any ) -> Tuple:
'''simple docstring'''
# transformer decoder self-attention layers
_UpperCamelCase: Any = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
_UpperCamelCase: Dict = state_dict.pop(F"""transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
_UpperCamelCase: int = state_dict.pop(F"""transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
_UpperCamelCase: Optional[int] = in_proj_weight[:hidden_size, :]
_UpperCamelCase: int = in_proj_bias[:hidden_size]
_UpperCamelCase: int = in_proj_weight[
hidden_size : hidden_size * 2, :
]
_UpperCamelCase: Optional[int] = in_proj_bias[hidden_size : hidden_size * 2]
_UpperCamelCase: Any = in_proj_weight[-hidden_size:, :]
_UpperCamelCase: Any = in_proj_bias[-hidden_size:]
def lowerCAmelCase_ ( ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase: Optional[int] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_UpperCamelCase: Union[str, Any] = Image.open(requests.get(lowercase , stream=lowercase ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( lowercase: Optional[int] , lowercase: int , lowercase: Any ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase: int = get_deta_config(lowercase )
# load original state dict
if model_name == "deta-swin-large":
_UpperCamelCase: int = hf_hub_download(repo_id='''nielsr/deta-checkpoints''' , filename='''adet_swin_ft.pth''' )
elif model_name == "deta-swin-large-o365":
_UpperCamelCase: List[str] = hf_hub_download(repo_id='''jozhang97/deta-swin-l-o365''' , filename='''deta_swin_pt_o365.pth''' )
else:
raise ValueError(F"""Model name {model_name} not supported""" )
_UpperCamelCase: Union[str, Any] = torch.load(lowercase , map_location='''cpu''' )['''model''']
# original state dict
for name, param in state_dict.items():
print(lowercase , param.shape )
# rename keys
_UpperCamelCase: Dict = create_rename_keys(lowercase )
for src, dest in rename_keys:
rename_key(lowercase , lowercase , lowercase )
read_in_swin_q_k_v(lowercase , config.backbone_config )
read_in_decoder_q_k_v(lowercase , lowercase )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
_UpperCamelCase: Dict = state_dict.pop(lowercase )
_UpperCamelCase: str = val
if "input_proj" in key:
_UpperCamelCase: int = state_dict.pop(lowercase )
_UpperCamelCase: List[str] = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
_UpperCamelCase: Tuple = state_dict.pop(lowercase )
_UpperCamelCase: Optional[Any] = val
# finally, create HuggingFace model and load state dict
_UpperCamelCase: Union[str, Any] = DetaForObjectDetection(lowercase )
model.load_state_dict(lowercase )
model.eval()
_UpperCamelCase: List[Any] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
model.to(lowercase )
# load image processor
_UpperCamelCase: Optional[Any] = DetaImageProcessor(format='''coco_detection''' )
# verify our conversion on image
_UpperCamelCase: int = prepare_img()
_UpperCamelCase: int = processor(images=lowercase , return_tensors='''pt''' )
_UpperCamelCase: int = encoding['''pixel_values''']
_UpperCamelCase: List[Any] = model(pixel_values.to(lowercase ) )
# verify logits
print('''Logits:''' , outputs.logits[0, :3, :3] )
print('''Boxes:''' , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
_UpperCamelCase: int = torch.tensor(
[[-7.6308, -2.8485, -5.3737], [-7.2037, -4.5505, -4.8027], [-7.2943, -4.2611, -4.6617]] )
_UpperCamelCase: List[Any] = torch.tensor([[0.4987, 0.4969, 0.9999], [0.2549, 0.5498, 0.4805], [0.5498, 0.2757, 0.0569]] )
elif model_name == "deta-swin-large-o365":
_UpperCamelCase: Dict = torch.tensor(
[[-8.0122, -3.5720, -4.9717], [-8.1547, -3.6886, -4.6389], [-7.6610, -3.6194, -5.0134]] )
_UpperCamelCase: Union[str, Any] = torch.tensor([[0.2523, 0.5549, 0.4881], [0.7715, 0.4149, 0.4601], [0.5503, 0.2753, 0.0575]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(lowercase ) , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(lowercase ) , atol=1E-4 )
print('''Everything ok!''' )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(F"""Saving PyTorch model and processor to {pytorch_dump_folder_path}...""" )
Path(lowercase ).mkdir(exist_ok=lowercase )
model.save_pretrained(lowercase )
processor.save_pretrained(lowercase )
# Push to hub
if push_to_hub:
print('''Pushing model and processor to hub...''' )
model.push_to_hub(F"""jozhang97/{model_name}""" )
processor.push_to_hub(F"""jozhang97/{model_name}""" )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
type=str,
default='''deta-swin-large''',
choices=['''deta-swin-large''', '''deta-swin-large-o365'''],
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
help='''Path to the folder to output PyTorch model.''',
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
UpperCAmelCase_ = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 271 | import torch
def lowerCAmelCase_ ( ) -> int:
'''simple docstring'''
if torch.cuda.is_available():
_UpperCamelCase: Any = torch.cuda.device_count()
else:
_UpperCamelCase: Union[str, Any] = 0
print(F"""Successfully ran on {num_gpus} GPUs""" )
if __name__ == "__main__":
main() | 271 | 1 |
UpperCamelCase__ = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
UpperCamelCase__ = [{"type": "code", "content": INSTALL_CONTENT}]
UpperCamelCase__ = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 548 |
from math import ceil, sqrt
def _UpperCamelCase (a__ :int = 100_0000 ):
"""simple docstring"""
UpperCamelCase__ = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
UpperCamelCase__ = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
UpperCamelCase__ = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f"""{solution() = }""")
| 548 | 1 |
from __future__ import annotations
def lowerCamelCase ( a_ ) -> bool:
lowerCAmelCase_ = str(a_ )
return len(a_ ) == 9 and set(a_ ) == set('123456789' )
def lowerCamelCase ( ) -> int | None:
for base_num in range(9_999 , 4_999 , -1 ):
lowerCAmelCase_ = 100_002 * base_num
if is_9_pandigital(a_ ):
return candidate
for base_num in range(333 , 99 , -1 ):
lowerCAmelCase_ = 1_002_003 * base_num
if is_9_pandigital(a_ ):
return candidate
return None
if __name__ == "__main__":
print(f'''{solution() = }''')
| 318 |
from __future__ import annotations
def lowerCamelCase ( a_ ) -> bool:
lowerCAmelCase_ = str(a_ )
return len(a_ ) == 9 and set(a_ ) == set('123456789' )
def lowerCamelCase ( ) -> int | None:
for base_num in range(9_999 , 4_999 , -1 ):
lowerCAmelCase_ = 100_002 * base_num
if is_9_pandigital(a_ ):
return candidate
for base_num in range(333 , 99 , -1 ):
lowerCAmelCase_ = 1_002_003 * base_num
if is_9_pandigital(a_ ):
return candidate
return None
if __name__ == "__main__":
print(f'''{solution() = }''')
| 318 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
snake_case__ = {
'configuration_ernie': ['ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ErnieConfig', 'ErnieOnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = [
'ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST',
'ErnieForCausalLM',
'ErnieForMaskedLM',
'ErnieForMultipleChoice',
'ErnieForNextSentencePrediction',
'ErnieForPreTraining',
'ErnieForQuestionAnswering',
'ErnieForSequenceClassification',
'ErnieForTokenClassification',
'ErnieModel',
'ErniePreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
snake_case__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 638 | import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
snake_case__ = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
snake_case__ = [0, 25, 50]
snake_case__ = [25, 50, 75]
snake_case__ = fuzz.membership.trimf(X, abca)
snake_case__ = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
snake_case__ = np.ones(75)
snake_case__ = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
snake_case__ = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
snake_case__ = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
snake_case__ = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
snake_case__ = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
snake_case__ = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
snake_case__ = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
snake_case__ = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
snake_case__ = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('Young')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('Middle aged')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('union')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('intersection')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('complement_a')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('difference a/b')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('alg_sum')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('alg_product')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('bdd_sum')
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title('bdd_difference')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show() | 638 | 1 |
'''simple docstring'''
from __future__ import annotations
class UpperCAmelCase :
"""simple docstring"""
def __init__( self : int , UpperCamelCase__ : list[list[int]] ) -> Tuple:
_UpperCamelCase =TypeError(
'''Matrices must be formed from a list of zero or more lists containing at '''
'''least one and the same number of values, each of which must be of type '''
'''int or float.''' )
if len(UpperCamelCase__ ) != 0:
_UpperCamelCase =len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(UpperCamelCase__ ) != cols:
raise error
for value in row:
if not isinstance(UpperCamelCase__ , (int, float) ):
raise error
_UpperCamelCase =rows
else:
_UpperCamelCase =[]
def UpperCamelCase__ ( self : Optional[int] ) -> list[list[int]]:
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def UpperCamelCase__ ( self : int ) -> int:
return len(self.rows )
@property
def UpperCamelCase__ ( self : Union[str, Any] ) -> int:
return len(self.rows[0] )
@property
def UpperCamelCase__ ( self : Union[str, Any] ) -> tuple[int, int]:
return (self.num_rows, self.num_columns)
@property
def UpperCamelCase__ ( self : List[Any] ) -> bool:
return self.order[0] == self.order[1]
def UpperCamelCase__ ( self : Dict ) -> Matrix:
_UpperCamelCase =[
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(UpperCamelCase__ )
def UpperCamelCase__ ( self : Tuple ) -> int:
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def UpperCamelCase__ ( self : Union[str, Any] ) -> bool:
return bool(self.determinant() )
def UpperCamelCase__ ( self : str , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> int:
_UpperCamelCase =[
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(UpperCamelCase__ ).determinant()
def UpperCamelCase__ ( self : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> int:
if (row + column) % 2 == 0:
return self.get_minor(UpperCamelCase__ , UpperCamelCase__ )
return -1 * self.get_minor(UpperCamelCase__ , UpperCamelCase__ )
def UpperCamelCase__ ( self : List[Any] ) -> Matrix:
return Matrix(
[
[self.get_minor(UpperCamelCase__ , UpperCamelCase__ ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def UpperCamelCase__ ( self : List[Any] ) -> Matrix:
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def UpperCamelCase__ ( self : Optional[int] ) -> Matrix:
_UpperCamelCase =[
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(UpperCamelCase__ )
def UpperCamelCase__ ( self : Any ) -> Matrix:
_UpperCamelCase =self.determinant()
if not determinant:
raise TypeError('''Only matrices with a non-zero determinant have an inverse''' )
return self.adjugate() * (1 / determinant)
def __repr__( self : Union[str, Any] ) -> str:
return str(self.rows )
def __str__( self : List[str] ) -> str:
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
'''[''' + '''. '''.join([str(UpperCamelCase__ ) for value in row] ) + '''.]'''
for row in self.rows
] )
+ "]"
)
def UpperCamelCase__ ( self : List[Any] , UpperCamelCase__ : list[int] , UpperCamelCase__ : int | None = None ) -> None:
_UpperCamelCase =TypeError('''Row must be a list containing all ints and/or floats''' )
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise type_error
for value in row:
if not isinstance(UpperCamelCase__ , (int, float) ):
raise type_error
if len(UpperCamelCase__ ) != self.num_columns:
raise ValueError(
'''Row must be equal in length to the other rows in the matrix''' )
if position is None:
self.rows.append(UpperCamelCase__ )
else:
_UpperCamelCase =self.rows[0:position] + [row] + self.rows[position:]
def UpperCamelCase__ ( self : Dict , UpperCamelCase__ : list[int] , UpperCamelCase__ : int | None = None ) -> None:
_UpperCamelCase =TypeError(
'''Column must be a list containing all ints and/or floats''' )
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise type_error
for value in column:
if not isinstance(UpperCamelCase__ , (int, float) ):
raise type_error
if len(UpperCamelCase__ ) != self.num_rows:
raise ValueError(
'''Column must be equal in length to the other columns in the matrix''' )
if position is None:
_UpperCamelCase =[self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
_UpperCamelCase =[
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self : Any , UpperCamelCase__ : object ) -> bool:
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
return NotImplemented
return self.rows == other.rows
def __ne__( self : Dict , UpperCamelCase__ : object ) -> bool:
return not self == other
def __neg__( self : Union[str, Any] ) -> Matrix:
return self * -1
def __add__( self : List[Any] , UpperCamelCase__ : Matrix ) -> Matrix:
if self.order != other.order:
raise ValueError('''Addition requires matrices of the same order''' )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self : Optional[int] , UpperCamelCase__ : Matrix ) -> Matrix:
if self.order != other.order:
raise ValueError('''Subtraction requires matrices of the same order''' )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self : List[Any] , UpperCamelCase__ : Matrix | int | float ) -> Matrix:
if isinstance(UpperCamelCase__ , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
if self.num_columns != other.num_rows:
raise ValueError(
'''The number of columns in the first matrix must '''
'''be equal to the number of rows in the second''' )
return Matrix(
[
[Matrix.dot_product(UpperCamelCase__ , UpperCamelCase__ ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
'''A Matrix can only be multiplied by an int, float, or another matrix''' )
def __pow__( self : Any , UpperCamelCase__ : int ) -> Matrix:
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise TypeError('''A Matrix can only be raised to the power of an int''' )
if not self.is_square:
raise ValueError('''Only square matrices can be raised to a power''' )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
'''Only invertable matrices can be raised to a negative power''' )
_UpperCamelCase =self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def UpperCamelCase__ ( cls : int , UpperCamelCase__ : list[int] , UpperCamelCase__ : list[int] ) -> int:
return sum(row[i] * column[i] for i in range(len(UpperCamelCase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 404 |
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def _a (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCamelCase =[
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _a (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCamelCase =list(s_dict.keys() )
for key in keys:
if "transformer_layers" in key:
_UpperCamelCase =s_dict.pop(__SCREAMING_SNAKE_CASE )
elif "subsample" in key:
_UpperCamelCase =s_dict.pop(__SCREAMING_SNAKE_CASE )
def _a (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase =emb.weight.shape
_UpperCamelCase =nn.Linear(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , bias=__SCREAMING_SNAKE_CASE )
_UpperCamelCase =emb.weight.data
return lin_layer
def _a (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCamelCase =torch.load(__SCREAMING_SNAKE_CASE , map_location='''cpu''' )
_UpperCamelCase =mam_aaa['''args''']
_UpperCamelCase =mam_aaa['''model''']
_UpperCamelCase =state_dict['''decoder.output_projection.weight''']
remove_ignore_keys_(__SCREAMING_SNAKE_CASE )
rename_keys(__SCREAMING_SNAKE_CASE )
_UpperCamelCase =state_dict['''decoder.embed_tokens.weight'''].shape[0]
_UpperCamelCase =args.share_decoder_input_output_embed
_UpperCamelCase =[int(__SCREAMING_SNAKE_CASE ) for i in args.conv_kernel_sizes.split(''',''' )]
_UpperCamelCase =SpeechaTextConfig(
vocab_size=__SCREAMING_SNAKE_CASE , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''relu''' , num_conv_layers=len(__SCREAMING_SNAKE_CASE ) , conv_channels=args.conv_channels , conv_kernel_sizes=__SCREAMING_SNAKE_CASE , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=__SCREAMING_SNAKE_CASE , num_beams=5 , max_length=200 , use_cache=__SCREAMING_SNAKE_CASE , decoder_start_token_id=2 , early_stopping=__SCREAMING_SNAKE_CASE , )
_UpperCamelCase =SpeechaTextForConditionalGeneration(__SCREAMING_SNAKE_CASE )
_UpperCamelCase , _UpperCamelCase =model.model.load_state_dict(__SCREAMING_SNAKE_CASE , strict=__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) > 0 and not set(__SCREAMING_SNAKE_CASE ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'''
f''' but all the following weights are missing {missing}''' )
if tie_embeds:
_UpperCamelCase =make_linear_from_emb(model.model.decoder.embed_tokens )
else:
_UpperCamelCase =lm_head_weights
model.save_pretrained(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowerCamelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--fairseq_path', type=str, help='Path to the fairseq model (.pt) file.')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
__lowerCamelCase : List[Any] = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 404 | 1 |
'''simple docstring'''
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
def A__ ( self : Any ) -> str:
'''simple docstring'''
lowercase : Tuple =tempfile.mkdtemp()
lowercase : str =5
# Realm tok
lowercase : Optional[int] =[
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''test''',
'''question''',
'''this''',
'''is''',
'''the''',
'''first''',
'''second''',
'''third''',
'''fourth''',
'''fifth''',
'''record''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
lowercase : Optional[int] =os.path.join(self.tmpdirname , '''realm_tokenizer''' )
os.makedirs(UpperCAmelCase , exist_ok=UpperCAmelCase )
lowercase : Optional[Any] =os.path.join(UpperCAmelCase , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
lowercase : Dict =os.path.join(self.tmpdirname , '''realm_block_records''' )
os.makedirs(UpperCAmelCase , exist_ok=UpperCAmelCase )
def A__ ( self : Any ) -> RealmTokenizer:
'''simple docstring'''
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''realm_tokenizer''' ) )
def A__ ( self : Dict ) -> Tuple:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def A__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Tuple =RealmConfig(num_block_records=self.num_block_records )
return config
def A__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
lowercase : List[Any] =Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''question''': ['''foo''', '''bar'''],
'''answers''': [['''Foo''', '''Bar'''], ['''Bar''']],
} )
return dataset
def A__ ( self : Tuple ) -> str:
'''simple docstring'''
lowercase : Optional[Any] =np.array(
[
b'''This is the first record''',
b'''This is the second record''',
b'''This is the third record''',
b'''This is the fourth record''',
b'''This is the fifth record''',
b'''This is a longer longer longer record''',
] , dtype=UpperCAmelCase , )
return block_records
def A__ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
lowercase : Optional[Any] =RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def A__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
lowercase : Union[str, Any] =self.get_config()
lowercase : Tuple =self.get_dummy_retriever()
lowercase : int =retriever.tokenizer
lowercase : Any =np.array([0, 3] , dtype='''long''' )
lowercase : Optional[int] =tokenizer(['''Test question'''] ).input_ids
lowercase : Optional[int] =tokenizer(
['''the fourth'''] , add_special_tokens=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_attention_mask=UpperCAmelCase , ).input_ids
lowercase : List[Any] =config.reader_seq_len
lowercase : Dict =retriever(
UpperCAmelCase , UpperCAmelCase , answer_ids=UpperCAmelCase , max_length=UpperCAmelCase , return_tensors='''np''' )
self.assertEqual(len(UpperCAmelCase ) , 2 )
self.assertEqual(len(UpperCAmelCase ) , 2 )
self.assertEqual(len(UpperCAmelCase ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''first''', '''record''', '''[SEP]'''] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''fourth''', '''record''', '''[SEP]'''] , )
def A__ ( self : Tuple ) -> int:
'''simple docstring'''
lowercase : List[str] =self.get_config()
lowercase : Optional[Any] =self.get_dummy_retriever()
lowercase : int =retriever.tokenizer
lowercase : Optional[Any] =np.array([0, 3, 5] , dtype='''long''' )
lowercase : Union[str, Any] =tokenizer(['''Test question'''] ).input_ids
lowercase : Tuple =tokenizer(
['''the fourth''', '''longer longer'''] , add_special_tokens=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_attention_mask=UpperCAmelCase , ).input_ids
lowercase : Tuple =config.reader_seq_len
lowercase : int =retriever(
UpperCAmelCase , UpperCAmelCase , answer_ids=UpperCAmelCase , max_length=UpperCAmelCase , return_tensors='''np''' )
self.assertEqual([False, True, True] , UpperCAmelCase )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , UpperCAmelCase )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , UpperCAmelCase )
def A__ ( self : str ) -> Optional[int]:
'''simple docstring'''
lowercase : Optional[int] =self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) )
# Test local path
lowercase : List[Any] =retriever.from_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) )
self.assertEqual(retriever.block_records[0] , b'''This is the first record''' )
# Test mocked remote path
with patch('''transformers.models.realm.retrieval_realm.hf_hub_download''' ) as mock_hf_hub_download:
lowercase : Dict =os.path.join(
os.path.join(self.tmpdirname , '''realm_block_records''' ) , _REALM_BLOCK_RECORDS_FILENAME )
lowercase : Dict =RealmRetriever.from_pretrained('''google/realm-cc-news-pretrained-openqa''' )
self.assertEqual(retriever.block_records[0] , b'''This is the first record''' )
| 715 |
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class UpperCAmelCase_ :
"""simple docstring"""
@staticmethod
def A__ ( *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : Optional[int] ) -> Any:
'''simple docstring'''
pass
def lowercase_ ( __A : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
SCREAMING_SNAKE_CASE = (
'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'
)
@is_pipeline_test
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def A__ ( self : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : str , UpperCAmelCase : List[str] ) -> Tuple:
'''simple docstring'''
lowercase : Dict =pipeline(
'''document-question-answering''' , model=UpperCAmelCase , tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowercase : Optional[Any] =INVOICE_URL
lowercase : Any =list(zip(*apply_tesseract(load_image(UpperCAmelCase ) , UpperCAmelCase , '''''' ) ) )
lowercase : Dict ='''What is the placebo?'''
lowercase : Optional[Any] =[
{
'''image''': load_image(UpperCAmelCase ),
'''question''': question,
},
{
'''image''': image,
'''question''': question,
},
{
'''image''': image,
'''question''': question,
'''word_boxes''': word_boxes,
},
]
return dqa_pipeline, examples
def A__ ( self : Any , UpperCAmelCase : Optional[int] , UpperCAmelCase : int ) -> Optional[int]:
'''simple docstring'''
lowercase : Dict =dqa_pipeline(UpperCAmelCase , top_k=2 )
self.assertEqual(
UpperCAmelCase , [
[
{'''score''': ANY(UpperCAmelCase ), '''answer''': ANY(UpperCAmelCase ), '''start''': ANY(UpperCAmelCase ), '''end''': ANY(UpperCAmelCase )},
{'''score''': ANY(UpperCAmelCase ), '''answer''': ANY(UpperCAmelCase ), '''start''': ANY(UpperCAmelCase ), '''end''': ANY(UpperCAmelCase )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def A__ ( self : int ) -> Any:
'''simple docstring'''
lowercase : Dict =pipeline('''document-question-answering''' , model='''hf-internal-testing/tiny-random-layoutlmv2''' )
lowercase : Union[str, Any] =INVOICE_URL
lowercase : Tuple ='''How many cats are there?'''
lowercase : Optional[int] =[
{'''score''': 0.0_0_0_1, '''answer''': '''oy 2312/2019''', '''start''': 38, '''end''': 39},
{'''score''': 0.0_0_0_1, '''answer''': '''oy 2312/2019 DUE''', '''start''': 38, '''end''': 40},
]
lowercase : Optional[Any] =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(nested_simplify(UpperCAmelCase , decimals=4 ) , UpperCAmelCase )
lowercase : List[str] =dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(nested_simplify(UpperCAmelCase , decimals=4 ) , UpperCAmelCase )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
lowercase : Optional[Any] ='''./tests/fixtures/tests_samples/COCO/000000039769.png'''
lowercase : Any =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(UpperCAmelCase , [] )
# We can optionnally pass directly the words and bounding boxes
lowercase : int ='''./tests/fixtures/tests_samples/COCO/000000039769.png'''
lowercase : Dict =[]
lowercase : str =[]
lowercase : str =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , words=UpperCAmelCase , boxes=UpperCAmelCase , top_k=2 )
self.assertEqual(UpperCAmelCase , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def A__ ( self : int ) -> Any:
'''simple docstring'''
lowercase : Union[str, Any] =pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , )
lowercase : Dict =INVOICE_URL
lowercase : str ='''What is the invoice number?'''
lowercase : List[str] =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
lowercase : List[Any] =dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
lowercase : List[Any] =dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
[
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def A__ ( self : Dict ) -> List[str]:
'''simple docstring'''
lowercase : str =pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , max_seq_len=50 , )
lowercase : Dict =INVOICE_URL
lowercase : Any ='''What is the invoice number?'''
lowercase : Union[str, Any] =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
lowercase : int =dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
lowercase : Any =dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
[
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def A__ ( self : str ) -> Dict:
'''simple docstring'''
lowercase : Any =AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=UpperCAmelCase )
lowercase : Tuple =pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=UpperCAmelCase , revision='''3dc6de3''' , )
lowercase : Tuple =INVOICE_URL
lowercase : Any ='''What is the invoice number?'''
lowercase : Dict =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
lowercase : Any =dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
lowercase : str =dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
[
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
]
]
* 2 , )
lowercase : Tuple =list(zip(*apply_tesseract(load_image(UpperCAmelCase ) , UpperCAmelCase , '''''' ) ) )
# This model should also work if `image` is set to None
lowercase : Dict =dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def A__ ( self : Dict ) -> Any:
'''simple docstring'''
lowercase : Dict =AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=UpperCAmelCase )
lowercase : List[Any] =pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=UpperCAmelCase , revision='''3dc6de3''' , max_seq_len=50 , )
lowercase : str =INVOICE_URL
lowercase : int ='''What is the invoice number?'''
lowercase : Tuple =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
lowercase : Union[str, Any] =dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
[
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
lowercase : List[str] =list(zip(*apply_tesseract(load_image(UpperCAmelCase ) , UpperCAmelCase , '''''' ) ) )
# This model should also work if `image` is set to None
lowercase : Union[str, Any] =dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
@slow
@require_torch
def A__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
lowercase : str =pipeline(
'''document-question-answering''' , model='''naver-clova-ix/donut-base-finetuned-docvqa''' , tokenizer=AutoTokenizer.from_pretrained('''naver-clova-ix/donut-base-finetuned-docvqa''' ) , feature_extractor='''naver-clova-ix/donut-base-finetuned-docvqa''' , )
lowercase : Any =INVOICE_URL
lowercase : Union[str, Any] ='''What is the invoice number?'''
lowercase : int =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(nested_simplify(UpperCAmelCase , decimals=4 ) , [{'''answer''': '''us-001'''}] )
@require_tf
@unittest.skip('''Document question answering not implemented in TF''' )
def A__ ( self : Any ) -> Any:
'''simple docstring'''
pass
| 8 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class _a ( SCREAMING_SNAKE_CASE__):
__magic_name__ = """yolos"""
def __init__( self : Optional[int] , _lowercase : int=768 , _lowercase : Dict=12 , _lowercase : Optional[int]=12 , _lowercase : Tuple=3072 , _lowercase : Union[str, Any]="gelu" , _lowercase : List[str]=0.0 , _lowercase : Tuple=0.0 , _lowercase : Dict=0.02 , _lowercase : Union[str, Any]=1E-12 , _lowercase : Optional[int]=[512, 864] , _lowercase : List[Any]=16 , _lowercase : Optional[int]=3 , _lowercase : str=True , _lowercase : str=100 , _lowercase : Union[str, Any]=True , _lowercase : Optional[Any]=False , _lowercase : Optional[Any]=1 , _lowercase : List[Any]=5 , _lowercase : List[str]=2 , _lowercase : str=5 , _lowercase : str=2 , _lowercase : List[str]=0.1 , **_lowercase : Dict , ) -> List[Any]:
super().__init__(**A_ )
snake_case : List[str] = hidden_size
snake_case : Union[str, Any] = num_hidden_layers
snake_case : int = num_attention_heads
snake_case : List[Any] = intermediate_size
snake_case : Any = hidden_act
snake_case : Optional[int] = hidden_dropout_prob
snake_case : Optional[Any] = attention_probs_dropout_prob
snake_case : Tuple = initializer_range
snake_case : List[Any] = layer_norm_eps
snake_case : int = image_size
snake_case : Optional[Any] = patch_size
snake_case : Tuple = num_channels
snake_case : str = qkv_bias
snake_case : int = num_detection_tokens
snake_case : Dict = use_mid_position_embeddings
snake_case : List[Any] = auxiliary_loss
# Hungarian matcher
snake_case : List[str] = class_cost
snake_case : str = bbox_cost
snake_case : Any = giou_cost
# Loss coefficients
snake_case : List[Any] = bbox_loss_coefficient
snake_case : str = giou_loss_coefficient
snake_case : Optional[int] = eos_coefficient
class _a ( SCREAMING_SNAKE_CASE__):
__magic_name__ = version.parse("""1.11""")
@property
def __lowercase ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def __lowercase ( self : Optional[int] ) -> float:
return 1E-4
@property
def __lowercase ( self : Optional[int] ) -> int:
return 12
| 449 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class __magic_name__ ( SCREAMING_SNAKE_CASE__ ):
UpperCamelCase_ = '''microsoft/speecht5_tts'''
UpperCamelCase_ = (
'''This is a tool that reads an English text out loud. It takes an input named `text` which should contain the '''
'''text to read (in English) and returns a waveform object containing the sound.'''
)
UpperCamelCase_ = '''text_reader'''
UpperCamelCase_ = SpeechTaProcessor
UpperCamelCase_ = SpeechTaForTextToSpeech
UpperCamelCase_ = SpeechTaHifiGan
UpperCamelCase_ = ['''text''']
UpperCamelCase_ = ['''audio''']
def lowercase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
if self.post_processor is None:
_lowercase: Tuple = '''microsoft/speecht5_hifigan'''
super().setup()
def lowercase_ ( self , A_ , A_=None ) -> Optional[int]:
"""simple docstring"""
_lowercase: Dict = self.pre_processor(text=A_ , return_tensors='''pt''' , truncation=A_ )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError('''Datasets needs to be installed if not passing speaker embeddings.''' )
_lowercase: Union[str, Any] = load_dataset('''Matthijs/cmu-arctic-xvectors''' , split='''validation''' )
_lowercase: Optional[int] = torch.tensor(embeddings_dataset[7305]['''xvector'''] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def lowercase_ ( self , A_ ) -> Optional[int]:
"""simple docstring"""
with torch.no_grad():
return self.model.generate_speech(**A_ )
def lowercase_ ( self , A_ ) -> Dict:
"""simple docstring"""
with torch.no_grad():
return self.post_processor(A_ ).cpu().detach()
| 353 | 0 |
"""simple docstring"""
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger()
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
lowerCamelCase__ =42
lowerCamelCase__ =field(default_factory=__snake_case )
lowerCamelCase__ =field(default_factory=__snake_case )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : List[str] = len(list(m.modules() ) ) == 1 or isinstance(a_ , nn.Convad ) or isinstance(a_ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(a_ )
def __call__(self , a_ ):
'''simple docstring'''
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(a_ )
[x.remove() for x in self.handles]
return self
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return list(filter(lambda a_ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
lowerCamelCase__ =42
lowerCamelCase__ =42
lowerCamelCase__ =1
lowerCamelCase__ =field(default_factory=__snake_case )
lowerCamelCase__ =field(default_factory=__snake_case )
lowerCamelCase__ =True
def __call__(self , a_ ):
'''simple docstring'''
__snake_case : List[Any] = Tracker(self.dest )(a_ ).parametrized
__snake_case : Tuple = Tracker(self.src )(a_ ).parametrized
__snake_case : Tuple = list(filter(lambda a_ : type(a_ ) not in self.src_skip , a_ ) )
__snake_case : Dict = list(filter(lambda a_ : type(a_ ) not in self.dest_skip , a_ ) )
if len(a_ ) != len(a_ ) and self.raise_if_mismatch:
raise Exception(
f"""Numbers of operations are different. Source module has {len(a_ )} operations while"""
f""" destination module has {len(a_ )}.""" )
for dest_m, src_m in zip(a_ , a_ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f"""Transfered from={src_m} to={dest_m}""" )
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__(self , a_ ):
'''simple docstring'''
super().__init__()
__snake_case : List[Tuple[str, nn.Module]] = []
# - get the stem
feature_blocks.append(('''conv1''', model.stem) )
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith('''block''' ), f"""Unexpected layer name {k}"""
__snake_case : Tuple = len(a_ ) + 1
feature_blocks.append((f"""res{block_index}""", v) )
__snake_case : str = nn.ModuleDict(a_ )
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
return get_trunk_forward_outputs(
a_ , out_feat_keys=a_ , feature_blocks=self._feature_blocks , )
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : Optional[Any] = x.split('''-''' )
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] )
def __getitem__(self , a_ ):
'''simple docstring'''
if x not in self:
__snake_case : Union[str, Any] = self.convert_name_to_timm(a_ )
__snake_case : List[str] = partial(lambda: (timm.create_model(a_ , pretrained=a_ ).eval(), None) )
else:
__snake_case : Any = super().__getitem__(a_ )
return val
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
def __getitem__(self , a_ ):
'''simple docstring'''
if "seer" in x and "in1k" not in x:
__snake_case : int = RegNetModel
else:
__snake_case : Tuple = RegNetForImageClassification
return val
def lowercase ( _snake_case : Union[str, Any] , _snake_case : Dict , _snake_case : List[Tuple[str, str]] ) ->Union[str, Any]:
"""simple docstring"""
for from_key, to_key in keys:
__snake_case : Optional[int] = from_state_dict[from_key].clone()
print(f"""Copied key={from_key} to={to_key}""" )
return to_state_dict
def lowercase ( _snake_case : str , _snake_case : Callable[[], nn.Module] , _snake_case : Callable[[], nn.Module] , _snake_case : RegNetConfig , _snake_case : Path , _snake_case : bool = True , ) ->str:
"""simple docstring"""
print(f"""Converting {name}...""" )
with torch.no_grad():
__snake_case : Tuple = from_model_func()
__snake_case : List[Any] = our_model_func(_snake_case ).eval()
__snake_case : Optional[int] = ModuleTransfer(src=_snake_case , dest=_snake_case , raise_if_mismatch=_snake_case )
__snake_case : Optional[int] = torch.randn((1, 3, 224, 224) )
module_transfer(_snake_case )
if from_state_dict is not None:
__snake_case : Tuple = []
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
__snake_case : Tuple = [('''0.clf.0.weight''', '''classifier.1.weight'''), ('''0.clf.0.bias''', '''classifier.1.bias''')]
__snake_case : Tuple = manually_copy_vissl_head(_snake_case , our_model.state_dict() , _snake_case )
our_model.load_state_dict(_snake_case )
__snake_case : str = our_model(_snake_case , output_hidden_states=_snake_case )
__snake_case : Optional[int] = (
our_outputs.logits if isinstance(_snake_case , _snake_case ) else our_outputs.last_hidden_state
)
__snake_case : Optional[int] = from_model(_snake_case )
__snake_case : int = from_output[-1] if type(_snake_case ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
__snake_case : Dict = our_outputs.hidden_states[-1]
assert torch.allclose(_snake_case , _snake_case ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name , commit_message='''Add model''' , use_temp_dir=_snake_case , )
__snake_case : Optional[Any] = 224 if '''seer''' not in name else 384
# we can use the convnext one
__snake_case : List[Any] = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' , size=_snake_case )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name , commit_message='''Add image processor''' , use_temp_dir=_snake_case , )
print(f"""Pushed {name}""" )
def lowercase ( _snake_case : Path , _snake_case : str = None , _snake_case : bool = True ) ->Dict:
"""simple docstring"""
__snake_case : List[Any] = '''imagenet-1k-id2label.json'''
__snake_case : Any = 1_000
__snake_case : Dict = (1, num_labels)
__snake_case : Optional[Any] = '''huggingface/label-files'''
__snake_case : Dict = num_labels
__snake_case : Any = json.load(open(cached_download(hf_hub_url(_snake_case , _snake_case , repo_type='''dataset''' ) ) , '''r''' ) )
__snake_case : str = {int(_snake_case ): v for k, v in idalabel.items()}
__snake_case : Optional[Any] = idalabel
__snake_case : Tuple = {v: k for k, v in idalabel.items()}
__snake_case : Union[str, Any] = partial(_snake_case , num_labels=_snake_case , idalabel=_snake_case , labelaid=_snake_case )
__snake_case : List[str] = {
'''regnet-x-002''': ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 , layer_type='''x''' ),
'''regnet-x-004''': ImageNetPreTrainedConfig(
depths=[1, 2, 7, 12] , hidden_sizes=[32, 64, 160, 384] , groups_width=16 , layer_type='''x''' ),
'''regnet-x-006''': ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7] , hidden_sizes=[48, 96, 240, 528] , groups_width=24 , layer_type='''x''' ),
'''regnet-x-008''': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5] , hidden_sizes=[64, 128, 288, 672] , groups_width=16 , layer_type='''x''' ),
'''regnet-x-016''': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 2] , hidden_sizes=[72, 168, 408, 912] , groups_width=24 , layer_type='''x''' ),
'''regnet-x-032''': ImageNetPreTrainedConfig(
depths=[2, 6, 15, 2] , hidden_sizes=[96, 192, 432, 1_008] , groups_width=48 , layer_type='''x''' ),
'''regnet-x-040''': ImageNetPreTrainedConfig(
depths=[2, 5, 14, 2] , hidden_sizes=[80, 240, 560, 1_360] , groups_width=40 , layer_type='''x''' ),
'''regnet-x-064''': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 392, 784, 1_624] , groups_width=56 , layer_type='''x''' ),
'''regnet-x-080''': ImageNetPreTrainedConfig(
depths=[2, 5, 15, 1] , hidden_sizes=[80, 240, 720, 1_920] , groups_width=120 , layer_type='''x''' ),
'''regnet-x-120''': ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2_240] , groups_width=112 , layer_type='''x''' ),
'''regnet-x-160''': ImageNetPreTrainedConfig(
depths=[2, 6, 13, 1] , hidden_sizes=[256, 512, 896, 2_048] , groups_width=128 , layer_type='''x''' ),
'''regnet-x-320''': ImageNetPreTrainedConfig(
depths=[2, 7, 13, 1] , hidden_sizes=[336, 672, 1_344, 2_520] , groups_width=168 , layer_type='''x''' ),
# y variant
'''regnet-y-002''': ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 ),
'''regnet-y-004''': ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6] , hidden_sizes=[48, 104, 208, 440] , groups_width=8 ),
'''regnet-y-006''': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4] , hidden_sizes=[48, 112, 256, 608] , groups_width=16 ),
'''regnet-y-008''': ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2] , hidden_sizes=[64, 128, 320, 768] , groups_width=16 ),
'''regnet-y-016''': ImageNetPreTrainedConfig(
depths=[2, 6, 17, 2] , hidden_sizes=[48, 120, 336, 888] , groups_width=24 ),
'''regnet-y-032''': ImageNetPreTrainedConfig(
depths=[2, 5, 13, 1] , hidden_sizes=[72, 216, 576, 1_512] , groups_width=24 ),
'''regnet-y-040''': ImageNetPreTrainedConfig(
depths=[2, 6, 12, 2] , hidden_sizes=[128, 192, 512, 1_088] , groups_width=64 ),
'''regnet-y-064''': ImageNetPreTrainedConfig(
depths=[2, 7, 14, 2] , hidden_sizes=[144, 288, 576, 1_296] , groups_width=72 ),
'''regnet-y-080''': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 448, 896, 2_016] , groups_width=56 ),
'''regnet-y-120''': ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2_240] , groups_width=112 ),
'''regnet-y-160''': ImageNetPreTrainedConfig(
depths=[2, 4, 11, 1] , hidden_sizes=[224, 448, 1_232, 3_024] , groups_width=112 ),
'''regnet-y-320''': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1_392, 3_712] , groups_width=232 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
'''regnet-y-320-seer''': RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1_392, 3_712] , groups_width=232 ),
'''regnet-y-640-seer''': RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1_968, 4_920] , groups_width=328 ),
'''regnet-y-1280-seer''': RegNetConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1_056, 2_904, 7_392] , groups_width=264 ),
'''regnet-y-2560-seer''': RegNetConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1_696, 2_544, 5_088] , groups_width=640 ),
'''regnet-y-10b-seer''': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2_020, 4_040, 11_110, 28_280] , groups_width=1_010 ),
# finetuned on imagenet
'''regnet-y-320-seer-in1k''': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1_392, 3_712] , groups_width=232 ),
'''regnet-y-640-seer-in1k''': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1_968, 4_920] , groups_width=328 ),
'''regnet-y-1280-seer-in1k''': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1_056, 2_904, 7_392] , groups_width=264 ),
'''regnet-y-2560-seer-in1k''': ImageNetPreTrainedConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1_696, 2_544, 5_088] , groups_width=640 ),
'''regnet-y-10b-seer-in1k''': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2_020, 4_040, 11_110, 28_280] , groups_width=1_010 ),
}
__snake_case : Optional[int] = NameToOurModelFuncMap()
__snake_case : Optional[Any] = NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(_snake_case : str , _snake_case : Callable[[], nn.Module] ) -> Tuple[nn.Module, Dict]:
__snake_case : Dict = torch.hub.load_state_dict_from_url(_snake_case , model_dir=str(_snake_case ) , map_location='''cpu''' )
__snake_case : List[str] = model_func()
# check if we have a head, if yes add it
__snake_case : Any = files['''classy_state_dict''']['''base_model''']['''model''']
__snake_case : List[Any] = model_state_dict['''trunk''']
model.load_state_dict(_snake_case )
return model.eval(), model_state_dict["heads"]
# pretrained
__snake_case : Optional[int] = partial(
_snake_case , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__snake_case : List[str] = partial(
_snake_case , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__snake_case : List[str] = partial(
_snake_case , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
__snake_case : str = partial(
_snake_case , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch''' , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1_010 , w_a=1_744 , w_a=620.83 , w_m=2.52 ) ) ) , )
# IN1K finetuned
__snake_case : Any = partial(
_snake_case , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__snake_case : Optional[Any] = partial(
_snake_case , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__snake_case : Tuple = partial(
_snake_case , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
__snake_case : Union[str, Any] = partial(
_snake_case , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch''' , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1_010 , w_a=1_744 , w_a=620.83 , w_m=2.52 ) ) ) , )
if model_name:
convert_weight_and_push(
_snake_case , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , _snake_case , _snake_case , )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
_snake_case , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , _snake_case , _snake_case , _snake_case , )
return config, expected_shape
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help=(
"""The name of the model you wish to convert, it must be one of the supported regnet* architecture,"""
""" currently: regnetx-*, regnety-*. If `None`, all of them will the converted."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=Path,
required=True,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
default=True,
type=bool,
required=False,
help="""If True, push model and image processor to the hub.""",
)
SCREAMING_SNAKE_CASE : Optional[Any] = parser.parse_args()
SCREAMING_SNAKE_CASE : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 705 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE : Any = {"""configuration_timm_backbone""": ["""TimmBackboneConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = ["""TimmBackbone"""]
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
SCREAMING_SNAKE_CASE : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 229 | 0 |
"""simple docstring"""
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
class lowerCAmelCase_ ( A__ ):
'''simple docstring'''
_snake_case = '''AutoTokenizer'''
_snake_case = ['''tokenizer''']
_snake_case = {
'''semantic_prompt''': 1,
'''coarse_prompt''': 2,
'''fine_prompt''': 2,
}
def __init__( self , snake_case_ , snake_case_=None ) -> Union[str, Any]:
super().__init__(snake_case_ )
__lowerCAmelCase = speaker_embeddings
@classmethod
def A__ ( cls , snake_case_ , snake_case_="speaker_embeddings_path.json" , **snake_case_ ) -> Union[str, Any]:
if speaker_embeddings_dict_path is not None:
__lowerCAmelCase = get_file_from_repo(
snake_case_ , snake_case_ , subfolder=kwargs.pop("""subfolder""" , snake_case_ ) , cache_dir=kwargs.pop("""cache_dir""" , snake_case_ ) , force_download=kwargs.pop("""force_download""" , snake_case_ ) , proxies=kwargs.pop("""proxies""" , snake_case_ ) , resume_download=kwargs.pop("""resume_download""" , snake_case_ ) , local_files_only=kwargs.pop("""local_files_only""" , snake_case_ ) , use_auth_token=kwargs.pop("""use_auth_token""" , snake_case_ ) , revision=kwargs.pop("""revision""" , snake_case_ ) , )
if speaker_embeddings_path is None:
logger.warning(
f"""`{os.path.join(snake_case_ , snake_case_ )}` does not exists
, no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json
dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.""" )
__lowerCAmelCase = None
else:
with open(snake_case_ ) as speaker_embeddings_json:
__lowerCAmelCase = json.load(snake_case_ )
else:
__lowerCAmelCase = None
__lowerCAmelCase = AutoTokenizer.from_pretrained(snake_case_ , **snake_case_ )
return cls(tokenizer=snake_case_ , speaker_embeddings=snake_case_ )
def A__ ( self , snake_case_ , snake_case_="speaker_embeddings_path.json" , snake_case_="speaker_embeddings" , snake_case_ = False , **snake_case_ , ) -> Dict:
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(snake_case_ , snake_case_ , """v2""" ) , exist_ok=snake_case_ )
__lowerCAmelCase = {}
__lowerCAmelCase = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
__lowerCAmelCase = self._load_voice_preset(snake_case_ )
__lowerCAmelCase = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict["""repo_or_path"""] , snake_case_ , f"""{prompt_key}_{key}""" ) , voice_preset[key] , allow_pickle=snake_case_ , )
__lowerCAmelCase = os.path.join(snake_case_ , f"""{prompt_key}_{key}.npy""" )
__lowerCAmelCase = tmp_dict
with open(os.path.join(snake_case_ , snake_case_ ) , """w""" ) as fp:
json.dump(snake_case_ , snake_case_ )
super().save_pretrained(snake_case_ , snake_case_ , **snake_case_ )
def A__ ( self , snake_case_ = None , **snake_case_ ) -> str:
__lowerCAmelCase = self.speaker_embeddings[voice_preset]
__lowerCAmelCase = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
f"""Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].""" )
__lowerCAmelCase = get_file_from_repo(
self.speaker_embeddings.get("""repo_or_path""" , """/""" ) , voice_preset_paths[key] , subfolder=kwargs.pop("""subfolder""" , snake_case_ ) , cache_dir=kwargs.pop("""cache_dir""" , snake_case_ ) , force_download=kwargs.pop("""force_download""" , snake_case_ ) , proxies=kwargs.pop("""proxies""" , snake_case_ ) , resume_download=kwargs.pop("""resume_download""" , snake_case_ ) , local_files_only=kwargs.pop("""local_files_only""" , snake_case_ ) , use_auth_token=kwargs.pop("""use_auth_token""" , snake_case_ ) , revision=kwargs.pop("""revision""" , snake_case_ ) , )
if path is None:
raise ValueError(
f"""`{os.path.join(self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] )}` does not exists
, no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}
embeddings.""" )
__lowerCAmelCase = np.load(snake_case_ )
return voice_preset_dict
def A__ ( self , snake_case_ = None ) -> List[Any]:
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(f"""Voice preset unrecognized, missing {key} as a key.""" )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(f"""{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.""" )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(f"""{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.""" )
def __call__( self , snake_case_=None , snake_case_=None , snake_case_="pt" , snake_case_=256 , snake_case_=False , snake_case_=True , snake_case_=False , **snake_case_ , ) -> str:
if voice_preset is not None and not isinstance(snake_case_ , snake_case_ ):
if (
isinstance(snake_case_ , snake_case_ )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
__lowerCAmelCase = self._load_voice_preset(snake_case_ )
else:
if isinstance(snake_case_ , snake_case_ ) and not voice_preset.endswith(""".npz""" ):
__lowerCAmelCase = voice_preset + """.npz"""
__lowerCAmelCase = np.load(snake_case_ )
if voice_preset is not None:
self._validate_voice_preset_dict(snake_case_ , **snake_case_ )
__lowerCAmelCase = BatchFeature(data=snake_case_ , tensor_type=snake_case_ )
__lowerCAmelCase = self.tokenizer(
snake_case_ , return_tensors=snake_case_ , padding="""max_length""" , max_length=snake_case_ , return_attention_mask=snake_case_ , return_token_type_ids=snake_case_ , add_special_tokens=snake_case_ , **snake_case_ , )
if voice_preset is not None:
__lowerCAmelCase = voice_preset
return encoded_text
| 465 |
"""simple docstring"""
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
class lowerCAmelCase_ ( enum.Enum ):
'''simple docstring'''
_snake_case = 0
_snake_case = 1
@add_end_docstrings(A__ )
class lowerCAmelCase_ ( A__ ):
'''simple docstring'''
_snake_case = '''generated'''
def __init__( self , *snake_case_ , **snake_case_ ) -> Optional[int]:
super().__init__(*snake_case_ , **snake_case_ )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == """tf"""
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def A__ ( self , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , **snake_case_ , ) -> Union[str, Any]:
__lowerCAmelCase = {}
if truncation is not None:
__lowerCAmelCase = truncation
__lowerCAmelCase = generate_kwargs
__lowerCAmelCase = {}
if return_tensors is not None and return_type is None:
__lowerCAmelCase = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
__lowerCAmelCase = return_type
if clean_up_tokenization_spaces is not None:
__lowerCAmelCase = clean_up_tokenization_spaces
if stop_sequence is not None:
__lowerCAmelCase = self.tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
if len(snake_case_ ) > 1:
warnings.warn(
"""Stopping on a multiple token sequence is not yet supported on transformers. The first token of"""
""" the stop sequence will be used as the stop sequence string in the interim.""" )
__lowerCAmelCase = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def A__ ( self , snake_case_ , snake_case_ , snake_case_ ) -> Any:
return True
def A__ ( self , *snake_case_ , snake_case_ ) -> Dict:
__lowerCAmelCase = self.model.config.prefix if self.model.config.prefix is not None else """"""
if isinstance(args[0] , snake_case_ ):
if self.tokenizer.pad_token_id is None:
raise ValueError("""Please make sure that the tokenizer has a pad_token_id when using a batch input""" )
__lowerCAmelCase = ([prefix + arg for arg in args[0]],)
__lowerCAmelCase = True
elif isinstance(args[0] , snake_case_ ):
__lowerCAmelCase = (prefix + args[0],)
__lowerCAmelCase = False
else:
raise ValueError(
f""" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`""" )
__lowerCAmelCase = self.tokenizer(*snake_case_ , padding=snake_case_ , truncation=snake_case_ , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self , *snake_case_ , **snake_case_ ) -> Dict:
__lowerCAmelCase = super().__call__(*snake_case_ , **snake_case_ )
if (
isinstance(args[0] , snake_case_ )
and all(isinstance(snake_case_ , snake_case_ ) for el in args[0] )
and all(len(snake_case_ ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def A__ ( self , snake_case_ , snake_case_=TruncationStrategy.DO_NOT_TRUNCATE , **snake_case_ ) -> Tuple:
__lowerCAmelCase = self._parse_and_tokenize(snake_case_ , truncation=snake_case_ , **snake_case_ )
return inputs
def A__ ( self , snake_case_ , **snake_case_ ) -> Union[str, Any]:
if self.framework == "pt":
__lowerCAmelCase , __lowerCAmelCase = model_inputs["""input_ids"""].shape
elif self.framework == "tf":
__lowerCAmelCase , __lowerCAmelCase = tf.shape(model_inputs["""input_ids"""] ).numpy()
__lowerCAmelCase = generate_kwargs.get("""min_length""" , self.model.config.min_length )
__lowerCAmelCase = generate_kwargs.get("""max_length""" , self.model.config.max_length )
self.check_inputs(snake_case_ , generate_kwargs["""min_length"""] , generate_kwargs["""max_length"""] )
__lowerCAmelCase = self.model.generate(**snake_case_ , **snake_case_ )
__lowerCAmelCase = output_ids.shape[0]
if self.framework == "pt":
__lowerCAmelCase = output_ids.reshape(snake_case_ , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
__lowerCAmelCase = tf.reshape(snake_case_ , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def A__ ( self , snake_case_ , snake_case_=ReturnType.TEXT , snake_case_=False ) -> Dict:
__lowerCAmelCase = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
__lowerCAmelCase = {f"""{self.return_name}_token_ids""": output_ids}
elif return_type == ReturnType.TEXT:
__lowerCAmelCase = {
f"""{self.return_name}_text""": self.tokenizer.decode(
snake_case_ , skip_special_tokens=snake_case_ , clean_up_tokenization_spaces=snake_case_ , )
}
records.append(snake_case_ )
return records
@add_end_docstrings(A__ )
class lowerCAmelCase_ ( A__ ):
'''simple docstring'''
_snake_case = '''summary'''
def __call__( self , *snake_case_ , **snake_case_ ) -> Tuple:
return super().__call__(*snake_case_ , **snake_case_ )
def A__ ( self , snake_case_ , snake_case_ , snake_case_ ) -> bool:
if max_length < min_length:
logger.warning(f"""Your min_length={min_length} must be inferior than your max_length={max_length}.""" )
if input_length < max_length:
logger.warning(
f"""Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is """
"""a summarization task, where outputs shorter than the input are typically wanted, you might """
f"""consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})""" )
@add_end_docstrings(A__ )
class lowerCAmelCase_ ( A__ ):
'''simple docstring'''
_snake_case = '''translation'''
def A__ ( self , snake_case_ , snake_case_ , snake_case_ ) -> Optional[int]:
if input_length > 0.9 * max_length:
logger.warning(
f"""Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider """
"""increasing your max_length manually, e.g. translator('...', max_length=400)""" )
return True
def A__ ( self , *snake_case_ , snake_case_=TruncationStrategy.DO_NOT_TRUNCATE , snake_case_=None , snake_case_=None ) -> List[Any]:
if getattr(self.tokenizer , """_build_translation_inputs""" , snake_case_ ):
return self.tokenizer._build_translation_inputs(
*snake_case_ , return_tensors=self.framework , truncation=snake_case_ , src_lang=snake_case_ , tgt_lang=snake_case_ )
else:
return super()._parse_and_tokenize(*snake_case_ , truncation=snake_case_ )
def A__ ( self , snake_case_=None , snake_case_=None , **snake_case_ ) -> List[Any]:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = super()._sanitize_parameters(**snake_case_ )
if src_lang is not None:
__lowerCAmelCase = src_lang
if tgt_lang is not None:
__lowerCAmelCase = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
__lowerCAmelCase = kwargs.get("""task""" , self.task )
__lowerCAmelCase = task.split("""_""" )
if task and len(snake_case_ ) == 4:
# translation, XX, to YY
__lowerCAmelCase = items[1]
__lowerCAmelCase = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self , *snake_case_ , **snake_case_ ) -> List[Any]:
return super().__call__(*snake_case_ , **snake_case_ )
| 465 | 1 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_a : Optional[Any] = {
'configuration_autoformer': [
'AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AutoformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Tuple = [
'AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'AutoformerForPrediction',
'AutoformerModel',
'AutoformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
_a : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 712 |
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
_a : Union[str, Any] = 1.0_54_57_18_17e-34 # unit of ℏ : J * s
_a : Optional[Any] = 3e8 # unit of c : m * s^-1
def UpperCamelCase__ ( _A: float , _A: float , _A: float ):
'''simple docstring'''
if (force, area, distance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if force < 0:
raise ValueError("""Magnitude of force can not be negative""" )
if distance < 0:
raise ValueError("""Distance can not be negative""" )
if area < 0:
raise ValueError("""Area can not be negative""" )
if force == 0:
__lowerCamelCase = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
240 * (distance) ** 4
)
return {"force": force}
elif area == 0:
__lowerCamelCase = (240 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
__lowerCamelCase = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError("""One and only one argument must be 0""" )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 571 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class UpperCamelCase( unittest.TestCase ):
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[Any]=7 , SCREAMING_SNAKE_CASE : Tuple=3 , SCREAMING_SNAKE_CASE : int=1_8 , SCREAMING_SNAKE_CASE : Union[str, Any]=3_0 , SCREAMING_SNAKE_CASE : List[str]=4_0_0 , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : Union[str, Any]=3_2 , SCREAMING_SNAKE_CASE : Union[str, Any]=True , ) -> Any:
'''simple docstring'''
__snake_case = parent
__snake_case = batch_size
__snake_case = num_channels
__snake_case = image_size
__snake_case = min_resolution
__snake_case = max_resolution
__snake_case = do_resize
__snake_case = size_divisor
__snake_case = do_rescale
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Any:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class UpperCamelCase( UpperCAmelCase__ , unittest.TestCase ):
snake_case_ : Tuple = GLPNImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
__snake_case = GLPNImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Dict:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , "do_resize" ) )
self.assertTrue(hasattr(_a , "size_divisor" ) )
self.assertTrue(hasattr(_a , "resample" ) )
self.assertTrue(hasattr(_a , "do_rescale" ) )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__snake_case = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> int:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a , np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__snake_case = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> int:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a , torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__snake_case = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 371 |
"""simple docstring"""
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
lowerCAmelCase : Dict = namedtuple(
"""_TestCommandArgs""",
[
"""dataset""",
"""name""",
"""cache_dir""",
"""data_dir""",
"""all_configs""",
"""save_infos""",
"""ignore_verifications""",
"""force_redownload""",
"""clear_cache""",
],
defaults=[None, None, None, False, False, False, False, False],
)
def a__ ( snake_case__ , snake_case__ ) -> Optional[Any]:
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def a__ ( snake_case__ ) -> int:
lowerCamelCase = _TestCommandArgs(dataset=snake_case__ , all_configs=snake_case__ , save_infos=snake_case__ )
lowerCamelCase = TestCommand(*snake_case__ )
test_command.run()
lowerCamelCase = os.path.join(snake_case__ , """README.md""" )
assert os.path.exists(snake_case__ )
lowerCamelCase = DatasetInfosDict.from_directory(snake_case__ )
lowerCamelCase = DatasetInfosDict(
{
"""default""": DatasetInfo(
features=Features(
{
"""tokens""": Sequence(Value("""string""" ) ),
"""ner_tags""": Sequence(
ClassLabel(names=["""O""", """B-PER""", """I-PER""", """B-ORG""", """I-ORG""", """B-LOC""", """I-LOC"""] ) ),
"""langs""": Sequence(Value("""string""" ) ),
"""spans""": Sequence(Value("""string""" ) ),
} ) , splits=[
{
"""name""": """train""",
"""num_bytes""": 2_35_15_63,
"""num_examples""": 1_00_00,
},
{
"""name""": """validation""",
"""num_bytes""": 23_84_18,
"""num_examples""": 10_00,
},
] , download_size=3_94_06_80 , dataset_size=2_58_99_81 , )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
lowerCamelCase , lowerCamelCase = getattr(dataset_infos["""default"""] , snake_case__ ), getattr(expected_dataset_infos["""default"""] , snake_case__ )
if key == "num_bytes":
assert is_apercent_close(snake_case__ , snake_case__ )
elif key == "splits":
assert list(snake_case__ ) == list(snake_case__ )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes )
else:
result == expected
| 543 | 0 |
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class _lowercase ( lowercase__):
"""simple docstring"""
def __get__( self : Union[str, Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Any=None ):
'''simple docstring'''
if obj is None:
return self
if self.fget is None:
raise AttributeError("unreadable attribute" )
lowerCamelCase__ : Optional[int] = "__cached_" + self.fget.__name__
lowerCamelCase__ : Optional[Any] = getattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if cached is None:
lowerCamelCase__ : List[Any] = self.fget(__lowerCamelCase )
setattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return cached
def lowercase_ ( _A : List[str] ):
"""simple docstring"""
lowerCamelCase__ : List[Any] = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(F"invalid truth value {val!r}" )
def lowercase_ ( _A : str ):
"""simple docstring"""
if is_torch_fx_proxy(_A ):
return True
if is_torch_available():
import torch
if isinstance(_A , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(_A , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(_A , (jnp.ndarray, Tracer) ):
return True
return isinstance(_A , np.ndarray )
def lowercase_ ( _A : int ):
"""simple docstring"""
return isinstance(_A , np.ndarray )
def lowercase_ ( _A : Dict ):
"""simple docstring"""
return _is_numpy(_A )
def lowercase_ ( _A : List[str] ):
"""simple docstring"""
import torch
return isinstance(_A , torch.Tensor )
def lowercase_ ( _A : Any ):
"""simple docstring"""
return False if not is_torch_available() else _is_torch(_A )
def lowercase_ ( _A : str ):
"""simple docstring"""
import torch
return isinstance(_A , torch.device )
def lowercase_ ( _A : Any ):
"""simple docstring"""
return False if not is_torch_available() else _is_torch_device(_A )
def lowercase_ ( _A : int ):
"""simple docstring"""
import torch
if isinstance(_A , _A ):
if hasattr(_A , _A ):
lowerCamelCase__ : List[Any] = getattr(_A , _A )
else:
return False
return isinstance(_A , torch.dtype )
def lowercase_ ( _A : Tuple ):
"""simple docstring"""
return False if not is_torch_available() else _is_torch_dtype(_A )
def lowercase_ ( _A : str ):
"""simple docstring"""
import tensorflow as tf
return isinstance(_A , tf.Tensor )
def lowercase_ ( _A : Union[str, Any] ):
"""simple docstring"""
return False if not is_tf_available() else _is_tensorflow(_A )
def lowercase_ ( _A : Dict ):
"""simple docstring"""
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(_A , "is_symbolic_tensor" ):
return tf.is_symbolic_tensor(_A )
return type(_A ) == tf.Tensor
def lowercase_ ( _A : Optional[Any] ):
"""simple docstring"""
return False if not is_tf_available() else _is_tf_symbolic_tensor(_A )
def lowercase_ ( _A : Optional[int] ):
"""simple docstring"""
import jax.numpy as jnp # noqa: F811
return isinstance(_A , jnp.ndarray )
def lowercase_ ( _A : List[Any] ):
"""simple docstring"""
return False if not is_flax_available() else _is_jax(_A )
def lowercase_ ( _A : Any ):
"""simple docstring"""
if isinstance(_A , (dict, UserDict) ):
return {k: to_py_obj(_A ) for k, v in obj.items()}
elif isinstance(_A , (list, tuple) ):
return [to_py_obj(_A ) for o in obj]
elif is_tf_tensor(_A ):
return obj.numpy().tolist()
elif is_torch_tensor(_A ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(_A ):
return np.asarray(_A ).tolist()
elif isinstance(_A , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def lowercase_ ( _A : List[Any] ):
"""simple docstring"""
if isinstance(_A , (dict, UserDict) ):
return {k: to_numpy(_A ) for k, v in obj.items()}
elif isinstance(_A , (list, tuple) ):
return np.array(_A )
elif is_tf_tensor(_A ):
return obj.numpy()
elif is_torch_tensor(_A ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(_A ):
return np.asarray(_A )
else:
return obj
class _lowercase ( lowercase__):
"""simple docstring"""
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = fields(self )
# Safety and consistency checks
if not len(__lowerCamelCase ):
raise ValueError(f"{self.__class__.__name__} has no fields." )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(f"{self.__class__.__name__} should not have more than one required field." )
lowerCamelCase__ : str = getattr(self , class_fields[0].name )
lowerCamelCase__ : List[Any] = all(getattr(self , field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(__lowerCamelCase ):
if isinstance(__lowerCamelCase , __lowerCamelCase ):
lowerCamelCase__ : Dict = first_field.items()
lowerCamelCase__ : List[str] = True
else:
try:
lowerCamelCase__ : Dict = iter(__lowerCamelCase )
lowerCamelCase__ : Dict = True
except TypeError:
lowerCamelCase__ : Optional[int] = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(__lowerCamelCase ):
if (
not isinstance(__lowerCamelCase , (list, tuple) )
or not len(__lowerCamelCase ) == 2
or not isinstance(element[0] , __lowerCamelCase )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
lowerCamelCase__ : int = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
f"Cannot set key/value for {element}. It needs to be a tuple (key, value)." )
break
setattr(self , element[0] , element[1] )
if element[1] is not None:
lowerCamelCase__ : Optional[int] = element[1]
elif first_field is not None:
lowerCamelCase__ : str = first_field
else:
for field in class_fields:
lowerCamelCase__ : str = getattr(self , field.name )
if v is not None:
lowerCamelCase__ : Any = v
def __delitem__( self : Tuple , *__lowerCamelCase : str , **__lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
raise Exception(f"You cannot use ``__delitem__`` on a {self.__class__.__name__} instance." )
def lowerCAmelCase ( self : Optional[int] , *__lowerCamelCase : Tuple , **__lowerCamelCase : Optional[int] ):
'''simple docstring'''
raise Exception(f"You cannot use ``setdefault`` on a {self.__class__.__name__} instance." )
def lowerCAmelCase ( self : Optional[int] , *__lowerCamelCase : Any , **__lowerCamelCase : int ):
'''simple docstring'''
raise Exception(f"You cannot use ``pop`` on a {self.__class__.__name__} instance." )
def lowerCAmelCase ( self : str , *__lowerCamelCase : Union[str, Any] , **__lowerCamelCase : str ):
'''simple docstring'''
raise Exception(f"You cannot use ``update`` on a {self.__class__.__name__} instance." )
def __getitem__( self : int , __lowerCamelCase : Any ):
'''simple docstring'''
if isinstance(__lowerCamelCase , __lowerCamelCase ):
lowerCamelCase__ : List[str] = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(__lowerCamelCase , __lowerCamelCase )
super().__setattr__(__lowerCamelCase , __lowerCamelCase )
def __setitem__( self : Tuple , __lowerCamelCase : Tuple , __lowerCamelCase : List[str] ):
'''simple docstring'''
super().__setitem__(__lowerCamelCase , __lowerCamelCase )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(__lowerCamelCase , __lowerCamelCase )
def lowerCAmelCase ( self : str ):
'''simple docstring'''
return tuple(self[k] for k in self.keys() )
class _lowercase ( lowercase__ , lowercase__):
"""simple docstring"""
@classmethod
def lowerCAmelCase ( cls : List[Any] , __lowerCamelCase : List[str] ):
'''simple docstring'''
raise ValueError(
f"{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}" )
class _lowercase ( lowercase__):
"""simple docstring"""
A__ = "longest"
A__ = "max_length"
A__ = "do_not_pad"
class _lowercase ( lowercase__):
"""simple docstring"""
A__ = "pt"
A__ = "tf"
A__ = "np"
A__ = "jax"
class _lowercase :
"""simple docstring"""
def __init__( self : Tuple , __lowerCamelCase : List[ContextManager] ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = context_managers
lowerCamelCase__ : Dict = ExitStack()
def __enter__( self : Dict ):
'''simple docstring'''
for context_manager in self.context_managers:
self.stack.enter_context(__lowerCamelCase )
def __exit__( self : List[str] , *__lowerCamelCase : Any , **__lowerCamelCase : int ):
'''simple docstring'''
self.stack.__exit__(*__lowerCamelCase , **__lowerCamelCase )
def lowercase_ ( _A : List[Any] ):
"""simple docstring"""
lowerCamelCase__ : str = infer_framework(_A )
if framework == "tf":
lowerCamelCase__ : Union[str, Any] = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
lowerCamelCase__ : Union[str, Any] = inspect.signature(model_class.forward ) # PyTorch models
else:
lowerCamelCase__ : Optional[Any] = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def lowercase_ ( _A : Any ):
"""simple docstring"""
lowerCamelCase__ : Any = model_class.__name__
lowerCamelCase__ : Optional[Any] = infer_framework(_A )
if framework == "tf":
lowerCamelCase__ : Optional[int] = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
lowerCamelCase__ : Optional[Any] = inspect.signature(model_class.forward ) # PyTorch models
else:
lowerCamelCase__ : Optional[Any] = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def lowercase_ ( _A : MutableMapping , _A : str = "" , _A : str = "." ):
"""simple docstring"""
def _flatten_dict(_A : Optional[int] , _A : Any="" , _A : Any="." ):
for k, v in d.items():
lowerCamelCase__ : List[str] = str(_A ) + delimiter + str(_A ) if parent_key else k
if v and isinstance(_A , _A ):
yield from flatten_dict(_A , _A , delimiter=_A ).items()
else:
yield key, v
return dict(_flatten_dict(_A , _A , _A ) )
@contextmanager
def lowercase_ ( _A : Dict , _A : bool = False ):
"""simple docstring"""
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def lowercase_ ( _A : Optional[int] , _A : Optional[int]=None ):
"""simple docstring"""
if is_numpy_array(_A ):
return np.transpose(_A , axes=_A )
elif is_torch_tensor(_A ):
return array.T if axes is None else array.permute(*_A )
elif is_tf_tensor(_A ):
import tensorflow as tf
return tf.transpose(_A , perm=_A )
elif is_jax_tensor(_A ):
return jnp.transpose(_A , axes=_A )
else:
raise ValueError(F"Type not supported for transpose: {type(_A )}." )
def lowercase_ ( _A : Any , _A : Any ):
"""simple docstring"""
if is_numpy_array(_A ):
return np.reshape(_A , _A )
elif is_torch_tensor(_A ):
return array.reshape(*_A )
elif is_tf_tensor(_A ):
import tensorflow as tf
return tf.reshape(_A , _A )
elif is_jax_tensor(_A ):
return jnp.reshape(_A , _A )
else:
raise ValueError(F"Type not supported for reshape: {type(_A )}." )
def lowercase_ ( _A : Dict , _A : int=None ):
"""simple docstring"""
if is_numpy_array(_A ):
return np.squeeze(_A , axis=_A )
elif is_torch_tensor(_A ):
return array.squeeze() if axis is None else array.squeeze(dim=_A )
elif is_tf_tensor(_A ):
import tensorflow as tf
return tf.squeeze(_A , axis=_A )
elif is_jax_tensor(_A ):
return jnp.squeeze(_A , axis=_A )
else:
raise ValueError(F"Type not supported for squeeze: {type(_A )}." )
def lowercase_ ( _A : Any , _A : Union[str, Any] ):
"""simple docstring"""
if is_numpy_array(_A ):
return np.expand_dims(_A , _A )
elif is_torch_tensor(_A ):
return array.unsqueeze(dim=_A )
elif is_tf_tensor(_A ):
import tensorflow as tf
return tf.expand_dims(_A , axis=_A )
elif is_jax_tensor(_A ):
return jnp.expand_dims(_A , axis=_A )
else:
raise ValueError(F"Type not supported for expand_dims: {type(_A )}." )
def lowercase_ ( _A : int ):
"""simple docstring"""
if is_numpy_array(_A ):
return np.size(_A )
elif is_torch_tensor(_A ):
return array.numel()
elif is_tf_tensor(_A ):
import tensorflow as tf
return tf.size(_A )
elif is_jax_tensor(_A ):
return array.size
else:
raise ValueError(F"Type not supported for expand_dims: {type(_A )}." )
def lowercase_ ( _A : Tuple , _A : Tuple ):
"""simple docstring"""
for key, value in auto_map.items():
if isinstance(_A , (tuple, list) ):
lowerCamelCase__ : Optional[Any] = [F"{repo_id}--{v}" if (v is not None and "--" not in v) else v for v in value]
elif value is not None and "--" not in value:
lowerCamelCase__ : Tuple = F"{repo_id}--{value}"
return auto_map
def lowercase_ ( _A : Dict ):
"""simple docstring"""
for base_class in inspect.getmro(_A ):
lowerCamelCase__ : Optional[int] = base_class.__module__
lowerCamelCase__ : List[str] = base_class.__name__
if module.startswith("tensorflow" ) or module.startswith("keras" ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith("torch" ) or name == "PreTrainedModel":
return "pt"
elif module.startswith("flax" ) or module.startswith("jax" ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(F"Could not infer framework from class {model_class}." )
| 701 |
def lowercase_ ( _A : int , _A : int ):
"""simple docstring"""
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
lowerCamelCase__ : List[str] = str(bin(_A ) )[2:] # remove the leading "0b"
lowerCamelCase__ : List[Any] = str(bin(_A ) )[2:] # remove the leading "0b"
lowerCamelCase__ : List[Any] = max(len(_A ) , len(_A ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(_A ) , b_binary.zfill(_A ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 | 0 |
"""simple docstring"""
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
a_ = imread(r'''digital_image_processing/image_data/lena_small.jpg''')
a_ = cvtColor(img, COLOR_BGR2GRAY)
def SCREAMING_SNAKE_CASE__ ( ):
"""simple docstring"""
snake_case_ : Optional[Any] = cn.convert_to_negative(SCREAMING_SNAKE_CASE__ )
# assert negative_img array for at least one True
assert negative_img.any()
def SCREAMING_SNAKE_CASE__ ( ):
"""simple docstring"""
with Image.open("""digital_image_processing/image_data/lena_small.jpg""" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(SCREAMING_SNAKE_CASE__ , 1_1_0 ) ).startswith(
"""<PIL.Image.Image image mode=RGB size=100x100 at""" )
def SCREAMING_SNAKE_CASE__ ( ):
"""simple docstring"""
snake_case_ : int = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def SCREAMING_SNAKE_CASE__ ( ):
"""simple docstring"""
snake_case_ : Optional[Any] = imread("""digital_image_processing/image_data/lena_small.jpg""" , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
snake_case_ : Any = canny.canny(SCREAMING_SNAKE_CASE__ )
# assert canny array for at least one True
assert canny_array.any()
def SCREAMING_SNAKE_CASE__ ( ):
"""simple docstring"""
assert gg.gaussian_filter(SCREAMING_SNAKE_CASE__ , 5 , sigma=0.9 ).all()
def SCREAMING_SNAKE_CASE__ ( ):
"""simple docstring"""
snake_case_ : Optional[Any] = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
snake_case_ : Union[str, Any] = conv.img_convolve(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).astype(SCREAMING_SNAKE_CASE__ )
assert res.any()
def SCREAMING_SNAKE_CASE__ ( ):
"""simple docstring"""
assert med.median_filter(SCREAMING_SNAKE_CASE__ , 3 ).any()
def SCREAMING_SNAKE_CASE__ ( ):
"""simple docstring"""
snake_case_ , snake_case_ : Dict = sob.sobel_filter(SCREAMING_SNAKE_CASE__ )
assert grad.any() and theta.any()
def SCREAMING_SNAKE_CASE__ ( ):
"""simple docstring"""
snake_case_ : str = sp.make_sepia(SCREAMING_SNAKE_CASE__ , 2_0 )
assert sepia.all()
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str = "digital_image_processing/image_data/lena_small.jpg" ):
"""simple docstring"""
snake_case_ : Optional[int] = bs.Burkes(imread(SCREAMING_SNAKE_CASE__ , 1 ) , 1_2_0 )
burkes.process()
assert burkes.output_img.any()
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str = "digital_image_processing/image_data/lena_small.jpg" , ):
"""simple docstring"""
snake_case_ : str = rs.NearestNeighbour(imread(SCREAMING_SNAKE_CASE__ , 1 ) , 4_0_0 , 2_0_0 )
nn.process()
assert nn.output.any()
def SCREAMING_SNAKE_CASE__ ( ):
"""simple docstring"""
snake_case_ : Dict = """digital_image_processing/image_data/lena.jpg"""
# Reading the image and converting it to grayscale.
snake_case_ : Tuple = imread(SCREAMING_SNAKE_CASE__ , 0 )
# Test for get_neighbors_pixel function() return not None
snake_case_ : List[Any] = 0
snake_case_ : str = 0
snake_case_ : Optional[Any] = image[x_coordinate][y_coordinate]
snake_case_ : Any = lbp.get_neighbors_pixel(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
snake_case_ : int = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
snake_case_ : List[str] = lbp.local_binary_value(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert lbp_image.any()
| 480 |
"""simple docstring"""
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
def __init__(self , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = False , lowercase__ = False , lowercase__ = None , **lowercase__ , ):
super().__init__(
lowercase__ , split=lowercase__ , features=lowercase__ , cache_dir=lowercase__ , keep_in_memory=lowercase__ , streaming=lowercase__ , num_proc=lowercase__ , **lowercase__ , )
snake_case_ : Tuple = path_or_paths if isinstance(lowercase__ , lowercase__ ) else {self.split: path_or_paths}
snake_case_ : Dict = Text(
cache_dir=lowercase__ , data_files=lowercase__ , features=lowercase__ , **lowercase__ , )
def __UpperCamelCase (self ):
# Build iterable dataset
if self.streaming:
snake_case_ : List[str] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
snake_case_ : str = None
snake_case_ : int = None
snake_case_ : int = None
snake_case_ : int = None
self.builder.download_and_prepare(
download_config=lowercase__ , download_mode=lowercase__ , verification_mode=lowercase__ , base_path=lowercase__ , num_proc=self.num_proc , )
snake_case_ : Dict = self.builder.as_dataset(
split=self.split , verification_mode=lowercase__ , in_memory=self.keep_in_memory )
return dataset
| 480 | 1 |
"""simple docstring"""
def _lowerCamelCase( a ):
__a = generate_pascal_triangle(a )
for row_idx in range(a ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=" " )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=" " )
else:
print(triangle[row_idx][col_idx] , end="" )
print()
def _lowerCamelCase( a ):
if not isinstance(a , a ):
raise TypeError("The input value of 'num_rows' should be 'int'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of 'num_rows' should be greater than or equal to 0" )
__a = []
for current_row_idx in range(a ):
__a = populate_current_row(a , a )
triangle.append(a )
return triangle
def _lowerCamelCase( a , a ):
__a = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
__a , __a = 1, 1
for current_col_idx in range(1 , a ):
calculate_current_element(
a , a , a , a )
return current_row
def _lowerCamelCase( a , a , a , a , ):
__a = triangle[current_row_idx - 1][current_col_idx - 1]
__a = triangle[current_row_idx - 1][current_col_idx]
__a = above_to_left_elt + above_to_right_elt
def _lowerCamelCase( a ):
if not isinstance(a , a ):
raise TypeError("The input value of 'num_rows' should be 'int'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of 'num_rows' should be greater than or equal to 0" )
__a = [[1]]
for row_index in range(1 , a ):
__a = [0] + result[-1] + [0]
__a = row_index + 1
# Calculate the number of distinct elements in a row
__a = sum(divmod(a , 2 ) )
__a = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
__a = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
__a = row_first_half + row_second_half
result.append(a )
return result
def _lowerCamelCase( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(a , a ) -> None:
__a = F"{func.__name__}({value})"
__a = timeit(F"__main__.{call}" , setup="import __main__" )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(F"{call:38} -- {timing:.4f} seconds" )
for value in range(1_5 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(a , a )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 67 | """simple docstring"""
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class snake_case__ :
_snake_case : torch.Tensor # [batch_size x 3]
_snake_case : torch.Tensor # [batch_size x 3]
_snake_case : torch.Tensor # [batch_size x 3]
_snake_case : torch.Tensor # [batch_size x 3]
_snake_case : int
_snake_case : int
_snake_case : float
_snake_case : float
_snake_case : Tuple[int]
def a__ ( self ):
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def a__ ( self ):
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def a__ ( self ):
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def a__ ( self ):
__a = torch.arange(self.height * self.width )
__a = torch.stack(
[
pixel_indices % self.width,
torch.div(lowerCamelCase , self.width , rounding_mode="trunc" ),
] , axis=1 , )
return coords
@property
def a__ ( self ):
__a , *__a = self.shape
__a = int(np.prod(lowerCamelCase ) )
__a = self.get_image_coords()
__a = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
__a = self.get_camera_rays(lowerCamelCase )
__a = rays.view(lowerCamelCase , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def a__ ( self , lowerCamelCase ):
__a , *__a , __a = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
__a = coords.view(lowerCamelCase , -1 , 2 )
__a = self.resolution()
__a = self.fov()
__a = (flat.float() / (res - 1)) * 2 - 1
__a = fracs * torch.tan(fov / 2 )
__a = fracs.view(lowerCamelCase , -1 , 2 )
__a = (
self.z.view(lowerCamelCase , 1 , 3 )
+ self.x.view(lowerCamelCase , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(lowerCamelCase , 1 , 3 ) * fracs[:, :, 1:]
)
__a = directions / directions.norm(dim=-1 , keepdim=lowerCamelCase )
__a = torch.stack(
[
torch.broadcast_to(self.origin.view(lowerCamelCase , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(lowerCamelCase , *lowerCamelCase , 2 , 3 )
def a__ ( self , lowerCamelCase , lowerCamelCase ):
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=lowerCamelCase , height=lowerCamelCase , x_fov=self.x_fov , y_fov=self.y_fov , )
def _lowerCamelCase( a ):
__a = []
__a = []
__a = []
__a = []
for theta in np.linspace(0 , 2 * np.pi , num=2_0 ):
__a = np.array([np.sin(a ), np.cos(a ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
__a = -z * 4
__a = np.array([np.cos(a ), -np.sin(a ), 0.0] )
__a = np.cross(a , a )
origins.append(a )
xs.append(a )
ys.append(a )
zs.append(a )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(a , axis=0 ) ).float() , x=torch.from_numpy(np.stack(a , axis=0 ) ).float() , y=torch.from_numpy(np.stack(a , axis=0 ) ).float() , z=torch.from_numpy(np.stack(a , axis=0 ) ).float() , width=a , height=a , x_fov=0.7 , y_fov=0.7 , shape=(1, len(a )) , )
| 67 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE_: Tuple ={'configuration_fnet': ['FNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: Optional[Any] =['FNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: List[Any] =['FNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: str =[
'FNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FNetForMaskedLM',
'FNetForMultipleChoice',
'FNetForNextSentencePrediction',
'FNetForPreTraining',
'FNetForQuestionAnswering',
'FNetForSequenceClassification',
'FNetForTokenClassification',
'FNetLayer',
'FNetModel',
'FNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_: str =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 78 | '''simple docstring'''
import logging
from transformers import PretrainedConfig
SCREAMING_SNAKE_CASE_: Any =logging.getLogger(__name__)
SCREAMING_SNAKE_CASE_: Any ={
'bertabs-finetuned-cnndm': 'https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json',
}
class __A ( UpperCamelCase__ ):
a__ : List[Any] = """bertabs"""
def __init__(self : Any , __a : int=30522 , __a : Tuple=512 , __a : Tuple=6 , __a : Dict=512 , __a : int=8 , __a : List[Any]=512 , __a : List[str]=0.2 , __a : List[Any]=6 , __a : int=768 , __a : Any=8 , __a : Dict=2048 , __a : Tuple=0.2 , **__a : Optional[int] , ):
super().__init__(**__a )
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = max_pos
UpperCAmelCase_ = enc_layers
UpperCAmelCase_ = enc_hidden_size
UpperCAmelCase_ = enc_heads
UpperCAmelCase_ = enc_ff_size
UpperCAmelCase_ = enc_dropout
UpperCAmelCase_ = dec_layers
UpperCAmelCase_ = dec_hidden_size
UpperCAmelCase_ = dec_heads
UpperCAmelCase_ = dec_ff_size
UpperCAmelCase_ = dec_dropout
| 78 | 1 |
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class __lowercase ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> Optional[int]:
super().__init__(
__UpperCAmelCase , split=__UpperCAmelCase , features=__UpperCAmelCase , cache_dir=__UpperCAmelCase , keep_in_memory=__UpperCAmelCase , streaming=__UpperCAmelCase , num_proc=__UpperCAmelCase , **__UpperCAmelCase , )
A : Optional[Any] = field
A : Optional[Any] = path_or_paths if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else {self.split: path_or_paths}
A : Dict = Json(
cache_dir=__UpperCAmelCase , data_files=__UpperCAmelCase , features=__UpperCAmelCase , field=__UpperCAmelCase , **__UpperCAmelCase , )
def snake_case ( self ) -> List[Any]:
# Build iterable dataset
if self.streaming:
A : List[Any] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
A : Any = None
A : Optional[int] = None
A : Optional[int] = None
A : Union[str, Any] = None
self.builder.download_and_prepare(
download_config=__UpperCAmelCase , download_mode=__UpperCAmelCase , verification_mode=__UpperCAmelCase , base_path=__UpperCAmelCase , num_proc=self.num_proc , )
A : List[str] = self.builder.as_dataset(
split=self.split , verification_mode=__UpperCAmelCase , in_memory=self.keep_in_memory )
return dataset
class __lowercase :
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> List[str]:
if num_proc is not None and num_proc <= 0:
raise ValueError(f'num_proc {num_proc} must be an integer > 0.' )
A : Optional[int] = dataset
A : Optional[Any] = path_or_buf
A : Union[str, Any] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
A : Dict = num_proc
A : List[str] = '''utf-8'''
A : List[Any] = to_json_kwargs
def snake_case ( self ) -> int:
A : List[Any] = self.to_json_kwargs.pop('''path_or_buf''' , __UpperCAmelCase )
A : str = self.to_json_kwargs.pop('''orient''' , '''records''' )
A : List[Any] = self.to_json_kwargs.pop('''lines''' , True if orient == '''records''' else False )
A : int = self.to_json_kwargs.pop('''index''' , False if orient in ['''split''', '''table'''] else True )
A : Dict = self.to_json_kwargs.pop('''compression''' , __UpperCAmelCase )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(f'`datasets` currently does not support {compression} compression' )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , '''wb''' , compression=__UpperCAmelCase ) as buffer:
A : Any = self._write(file_obj=__UpperCAmelCase , orient=__UpperCAmelCase , lines=__UpperCAmelCase , index=__UpperCAmelCase , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
f'The compression parameter is not supported when writing to a buffer, but compression={compression}'
''' was passed. Please provide a local path instead.''' )
A : str = self._write(
file_obj=self.path_or_buf , orient=__UpperCAmelCase , lines=__UpperCAmelCase , index=__UpperCAmelCase , **self.to_json_kwargs )
return written
def snake_case ( self , __UpperCAmelCase ) -> Any:
A : Dict = args
A : str = query_table(
table=self.dataset.data , key=slice(__UpperCAmelCase , offset + self.batch_size ) , indices=self.dataset._indices , )
A : Tuple = batch.to_pandas().to_json(
path_or_buf=__UpperCAmelCase , orient=__UpperCAmelCase , lines=__UpperCAmelCase , index=__UpperCAmelCase , **__UpperCAmelCase )
if not json_str.endswith('''\n''' ):
json_str += "\n"
return json_str.encode(self.encoding )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase , ) -> int:
A : Dict = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ):
A : List[str] = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(__UpperCAmelCase )
else:
A : str = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , __UpperCAmelCase , __UpperCAmelCase )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ):
written += file_obj.write(__UpperCAmelCase )
return written
| 700 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class __lowercase ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = '''facebook/bart-large-mnli'''
UpperCAmelCase_ : Optional[int] = (
'''This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '''
'''should be the text to classify, and `labels`, which should be the list of labels to use for classification. '''
'''It returns the most likely label in the list of provided `labels` for the input text.'''
)
UpperCAmelCase_ : Optional[Any] = '''text_classifier'''
UpperCAmelCase_ : str = AutoTokenizer
UpperCAmelCase_ : int = AutoModelForSequenceClassification
UpperCAmelCase_ : Union[str, Any] = ['''text''', ['''text''']]
UpperCAmelCase_ : Tuple = ['''text''']
def snake_case ( self ) -> List[str]:
super().setup()
A : int = self.model.config
A : List[str] = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('''entail''' ):
A : Dict = int(__UpperCAmelCase )
if self.entailment_id == -1:
raise ValueError('''Could not determine the entailment ID from the model config, please pass it at init.''' )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
A : List[str] = labels
return self.pre_processor(
[text] * len(__UpperCAmelCase ) , [f'This example is {label}' for label in labels] , return_tensors='''pt''' , padding='''max_length''' , )
def snake_case ( self , __UpperCAmelCase ) -> Tuple:
A : int = outputs.logits
A : int = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 423 | 0 |
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class __UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self , __A ):
__a = 3
__a = 250
__a = ids_tensor((batch_size, length) , __A )
__a = torch.ones((batch_size, length) , device=__A , dtype=torch.float ) / length
return input_ids, scores
def snake_case_ ( self ):
__a , __a = self._get_tensors(5 )
__a = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(__A , __A ) )
__a , __a = self._get_tensors(9 )
self.assertFalse(criteria(__A , __A ) )
__a , __a = self._get_tensors(10 )
self.assertTrue(criteria(__A , __A ) )
def snake_case_ ( self ):
__a = MaxLengthCriteria(max_length=10 )
__a , __a = self._get_tensors(5 )
self.assertFalse(criteria(__A , __A ) )
__a , __a = self._get_tensors(9 )
self.assertFalse(criteria(__A , __A ) )
__a , __a = self._get_tensors(10 )
self.assertTrue(criteria(__A , __A ) )
def snake_case_ ( self ):
__a = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
__a , __a = self._get_tensors(5 )
self.assertFalse(criteria(__A , __A ) )
__a , __a = self._get_tensors(9 )
self.assertFalse(criteria(__A , __A ) )
__a , __a = self._get_tensors(10 )
self.assertTrue(criteria(__A , __A ) )
__a = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def snake_case_ ( self ):
__a , __a = self._get_tensors(5 )
__a = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(__A , __A ) )
__a = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(__A , __A ) )
def snake_case_ ( self ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(__A ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
__a = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(__A ) , 1 )
| 99 |
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class snake_case__ ( __A ):
def A ( self ) -> Any:
"""simple docstring"""
a_ : Optional[Any] = pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type , pa.intaa() )
def A ( self ) -> List[Any]:
"""simple docstring"""
with self.assertRaises(UpperCamelCase_ ):
a_ : Optional[Any] = pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() )
def A ( self ) -> str:
"""simple docstring"""
with self.assertRaises(UpperCamelCase_ ):
a_ : int = pa.array(TypedSequence([1, 2, 3] , try_type=Value("""bool""" ) , type=Value("""int64""" ) ) )
def A ( self ) -> Optional[Any]:
"""simple docstring"""
a_ : List[str] = pa.array(TypedSequence([1, 2, 3] , type=Value("""int32""" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def A ( self ) -> int:
"""simple docstring"""
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
a_ : Dict = pa.array(TypedSequence(["""foo""", """bar"""] , type=Value("""int64""" ) ) )
def A ( self ) -> List[str]:
"""simple docstring"""
a_ : str = pa.array(TypedSequence([1, 2, 3] , try_type=Value("""int32""" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def A ( self ) -> str:
"""simple docstring"""
a_ : Dict = pa.array(TypedSequence(["""foo""", """bar"""] , try_type=Value("""int64""" ) ) )
self.assertEqual(arr.type , pa.string() )
def A ( self ) -> List[str]:
"""simple docstring"""
a_ : Optional[Any] = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , """int64""" ) )
def A ( self ) -> Tuple:
"""simple docstring"""
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
a_ : Tuple = pa.array(TypedSequence(["""foo""", """bar"""] , type=ArrayaD((1, 3) , """int64""" ) ) )
def A ( self ) -> str:
"""simple docstring"""
a_ : Union[str, Any] = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , """int64""" ) )
def A ( self ) -> List[Any]:
"""simple docstring"""
a_ : int = pa.array(TypedSequence(["""foo""", """bar"""] , try_type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , pa.string() )
@require_pil
def A ( self ) -> Any:
"""simple docstring"""
import PIL.Image
a_ : Optional[Any] = PIL.Image.fromarray(np.arange(10 , dtype=np.uinta ).reshape(2 , 5 ) )
with patch(
"""datasets.arrow_writer.cast_to_python_objects""" , side_effect=UpperCamelCase_ ) as mock_cast_to_python_objects:
a_ : List[Any] = pa.array(TypedSequence([{"""path""": None, """bytes""": B"""image_bytes"""}, pil_image] , type=Image() ) )
a_ , a_ : str = mock_cast_to_python_objects.call_args_list[-1]
self.assertIn("""optimize_list_casting""" , UpperCamelCase_ )
self.assertFalse(kwargs["""optimize_list_casting"""] )
def _lowerCamelCase ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
a_ : Any = pa.BufferReader(SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , pa.Buffer ) else pa.memory_map(SCREAMING_SNAKE_CASE_ )
a_ : Dict = pa.ipc.open_stream(SCREAMING_SNAKE_CASE_ )
a_ : pa.Table = f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def _lowerCamelCase ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Any ):
"""simple docstring"""
a_ : Dict = pa.BufferOutputStream()
a_ : List[str] = pa.schema(SCREAMING_SNAKE_CASE_ ) if fields else None
with ArrowWriter(stream=SCREAMING_SNAKE_CASE_ , schema=SCREAMING_SNAKE_CASE_ , writer_batch_size=SCREAMING_SNAKE_CASE_ ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
a_ , a_ : str = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
a_ : List[Any] = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(SCREAMING_SNAKE_CASE_ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def _lowerCamelCase ( ):
"""simple docstring"""
a_ : Dict = pa.BufferOutputStream()
a_ : Dict = Features({"""labels""": ClassLabel(names=["""neg""", """pos"""] )} )
with ArrowWriter(stream=SCREAMING_SNAKE_CASE_ , features=SCREAMING_SNAKE_CASE_ ) as writer:
writer.write({"""labels""": 0} )
writer.write({"""labels""": 1} )
a_ , a_ : Optional[Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
a_ : Any = pa.BufferReader(output.getvalue() )
a_ : List[str] = pa.ipc.open_stream(SCREAMING_SNAKE_CASE_ )
a_ : pa.Table = f.read_all()
a_ : Optional[int] = pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(SCREAMING_SNAKE_CASE_ )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] )
def _lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Any ):
"""simple docstring"""
a_ : Optional[Any] = pa.BufferOutputStream()
with ArrowWriter(
stream=SCREAMING_SNAKE_CASE_ , writer_batch_size=SCREAMING_SNAKE_CASE_ , hash_salt="""split_name""" , check_duplicates=SCREAMING_SNAKE_CASE_ , ) as writer:
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
writer.write({"""col_1""": """foo""", """col_2""": 1} , key=[1, 2] )
a_ , a_ : Any = writer.finalize()
@pytest.mark.parametrize("""writer_batch_size""" , [None, 2, 10] )
def _lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Tuple ):
"""simple docstring"""
a_ : Tuple = pa.BufferOutputStream()
with ArrowWriter(
stream=SCREAMING_SNAKE_CASE_ , writer_batch_size=SCREAMING_SNAKE_CASE_ , hash_salt="""split_name""" , check_duplicates=SCREAMING_SNAKE_CASE_ , ) as writer:
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
writer.write({"""col_1""": """foo""", """col_2""": 1} , key=10 )
writer.write({"""col_1""": """bar""", """col_2""": 2} , key=10 )
a_ , a_ : Union[str, Any] = writer.finalize()
@pytest.mark.parametrize("""writer_batch_size""" , [None, 2, 10] )
def _lowerCamelCase ( SCREAMING_SNAKE_CASE_ : List[Any] ):
"""simple docstring"""
a_ : Optional[Any] = pa.BufferOutputStream()
with ArrowWriter(
stream=SCREAMING_SNAKE_CASE_ , writer_batch_size=SCREAMING_SNAKE_CASE_ , hash_salt="""split_name""" , check_duplicates=SCREAMING_SNAKE_CASE_ , ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} , key=1 )
writer.write({"""col_1""": """bar""", """col_2""": 2} , key=2 )
a_ , a_ : int = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def _lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ):
"""simple docstring"""
a_ : List[str] = pa.BufferOutputStream()
a_ : List[str] = pa.schema(SCREAMING_SNAKE_CASE_ ) if fields else None
with ArrowWriter(stream=SCREAMING_SNAKE_CASE_ , schema=SCREAMING_SNAKE_CASE_ , writer_batch_size=SCREAMING_SNAKE_CASE_ ) as writer:
writer.write_batch({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} )
writer.write_batch({"""col_1""": [], """col_2""": []} )
a_ , a_ : List[Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
a_ : Tuple = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(SCREAMING_SNAKE_CASE_ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def _lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ):
"""simple docstring"""
a_ : str = pa.BufferOutputStream()
a_ : Optional[Any] = pa.schema(SCREAMING_SNAKE_CASE_ ) if fields else None
with ArrowWriter(stream=SCREAMING_SNAKE_CASE_ , schema=SCREAMING_SNAKE_CASE_ , writer_batch_size=SCREAMING_SNAKE_CASE_ ) as writer:
writer.write_table(pa.Table.from_pydict({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} ) )
a_ , a_ : Optional[int] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
a_ : Tuple = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(SCREAMING_SNAKE_CASE_ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def _lowerCamelCase ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
"""simple docstring"""
a_ : Union[str, Any] = pa.BufferOutputStream()
a_ : List[Any] = pa.schema(SCREAMING_SNAKE_CASE_ ) if fields else None
with ArrowWriter(stream=SCREAMING_SNAKE_CASE_ , schema=SCREAMING_SNAKE_CASE_ , writer_batch_size=SCREAMING_SNAKE_CASE_ ) as writer:
writer.write_row(pa.Table.from_pydict({"""col_1""": ["""foo"""], """col_2""": [1]} ) )
writer.write_row(pa.Table.from_pydict({"""col_1""": ["""bar"""], """col_2""": [2]} ) )
a_ , a_ : Union[str, Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
a_ : List[str] = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(SCREAMING_SNAKE_CASE_ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def _lowerCamelCase ( ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
a_ : str = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
a_ : List[str] = os.path.join(SCREAMING_SNAKE_CASE_ , """test.arrow""" )
with ArrowWriter(path=SCREAMING_SNAKE_CASE_ , schema=pa.schema(SCREAMING_SNAKE_CASE_ ) ) as writer:
writer.write_batch({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} )
a_ , a_ : Union[str, Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(SCREAMING_SNAKE_CASE_ , metadata=writer._schema.metadata )
_check_output(SCREAMING_SNAKE_CASE_ , 1 )
def _lowerCamelCase ( SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
if pa.types.is_list(SCREAMING_SNAKE_CASE_ ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def _lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
"""simple docstring"""
if isinstance(lst[0] , SCREAMING_SNAKE_CASE_ ):
change_first_primitive_element_in_list(lst[0] , SCREAMING_SNAKE_CASE_ )
else:
a_ : Dict = value
@pytest.mark.parametrize("""optimized_int_type, expected_dtype""" , [(None, pa.intaa()), (Value("""int32""" ), pa.intaa())] )
@pytest.mark.parametrize("""sequence""" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def _lowerCamelCase ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
"""simple docstring"""
a_ : str = pa.array(TypedSequence(SCREAMING_SNAKE_CASE_ , optimized_int_type=SCREAMING_SNAKE_CASE_ ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
"""col, expected_dtype""" , [
("""attention_mask""", pa.inta()),
("""special_tokens_mask""", pa.inta()),
("""token_type_ids""", pa.inta()),
("""input_ids""", pa.intaa()),
("""other""", pa.intaa()),
] , )
@pytest.mark.parametrize("""sequence""" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def _lowerCamelCase ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[Any] ):
"""simple docstring"""
a_ : Optional[int] = pa.array(OptimizedTypedSequence(SCREAMING_SNAKE_CASE_ , col=SCREAMING_SNAKE_CASE_ ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
a_ : str = copy.deepcopy(SCREAMING_SNAKE_CASE_ )
a_ : Any = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
a_ : List[str] = pa.array(OptimizedTypedSequence(SCREAMING_SNAKE_CASE_ , col=SCREAMING_SNAKE_CASE_ ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize("""raise_exception""" , [False, True] )
def _lowerCamelCase ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int] ):
"""simple docstring"""
a_ : Optional[int] = str(tmp_path / """dataset-train.arrow""" )
try:
with ArrowWriter(path=SCREAMING_SNAKE_CASE_ ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def _lowerCamelCase ( SCREAMING_SNAKE_CASE_ : List[Any] ):
"""simple docstring"""
a_ : Any = """mock://dataset-train.arrow"""
with ArrowWriter(path=SCREAMING_SNAKE_CASE_ , storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs , type(SCREAMING_SNAKE_CASE_ ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
a_ , a_ : Union[str, Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(SCREAMING_SNAKE_CASE_ )
def _lowerCamelCase ( ):
"""simple docstring"""
a_ : int = pa.BufferOutputStream()
with ParquetWriter(stream=SCREAMING_SNAKE_CASE_ ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
a_ , a_ : List[str] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
a_ : List[str] = pa.BufferReader(output.getvalue() )
a_ : pa.Table = pq.read_table(SCREAMING_SNAKE_CASE_ )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize("""embed_local_files""" , [False, True] )
def _lowerCamelCase ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int] ):
"""simple docstring"""
import PIL.Image
a_ : Optional[Any] = str(tmp_path / """test_image_rgb.jpg""" )
PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(SCREAMING_SNAKE_CASE_ , format="""png""" )
a_ : Tuple = pa.BufferOutputStream()
with ParquetWriter(
stream=SCREAMING_SNAKE_CASE_ , features=Features({"""image""": Image()} ) , embed_local_files=SCREAMING_SNAKE_CASE_ ) as writer:
writer.write({"""image""": image_path} )
writer.finalize()
a_ : int = pa.BufferReader(output.getvalue() )
a_ : pa.Table = pq.read_table(SCREAMING_SNAKE_CASE_ )
a_ : str = pa_table.to_pydict()
if embed_local_files:
assert isinstance(out["""image"""][0]["""path"""] , SCREAMING_SNAKE_CASE_ )
with open(SCREAMING_SNAKE_CASE_ , """rb""" ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def _lowerCamelCase ( ):
"""simple docstring"""
a_ : int = pa.schema([pa.field("""col_1""" , pa.string() , nullable=SCREAMING_SNAKE_CASE_ )] )
a_ : Tuple = pa.BufferOutputStream()
with ArrowWriter(stream=SCREAMING_SNAKE_CASE_ ) as writer:
writer._build_writer(inferred_schema=SCREAMING_SNAKE_CASE_ )
assert writer._schema == pa.schema([pa.field("""col_1""" , pa.string() )] )
| 419 | 0 |
'''simple docstring'''
import math
A_ = 10
A_ = 7
A_ = BALLS_PER_COLOUR * NUM_COLOURS
def _UpperCamelCase ( __UpperCamelCase = 20 ) -> str:
lowerCamelCase_ = math.comb(__UpperCamelCase ,__UpperCamelCase )
lowerCamelCase_ = math.comb(NUM_BALLS - BALLS_PER_COLOUR ,__UpperCamelCase )
lowerCamelCase_ = NUM_COLOURS * (1 - missing_colour / total)
return f'''{result:.9f}'''
if __name__ == "__main__":
print(solution(20))
| 384 |
'''simple docstring'''
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = parent
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
return {}
def _UpperCamelCase ( ) -> Any:
lowerCamelCase_ = '<HTML>\n\n <HEAD>\n <TITLE>sample document</TITLE>\n </HEAD>\n\n <BODY BGCOLOR="FFFFFF">\n <HR>\n <a href="http://google.com">Goog</a>\n <H1>This is one header</H1>\n <H2>This is a another Header</H2>\n <P>Travel from\n <P>\n <B>SFO to JFK</B>\n <BR>\n <B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>\n <HR>\n <div style="color:#0000FF">\n <h3>Traveler <b> name </b> is\n <p> John Doe </p>\n </div>'
lowerCamelCase_ = '\n <!DOCTYPE html>\n <html>\n <body>\n\n <h1>My First Heading</h1>\n <p>My first paragraph.</p>\n\n </body>\n </html>\n '
return [html_string_a, html_string_a]
@require_bsa
class UpperCAmelCase ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = MarkupLMFeatureExtractor if is_bsa_available() else None
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = MarkupLMFeatureExtractionTester(self )
@property
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
return self.feature_extract_tester.prepare_feat_extract_dict()
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = self.feature_extraction_class()
# Test not batched input
lowerCamelCase_ = get_html_strings()[0]
lowerCamelCase_ = feature_extractor(SCREAMING_SNAKE_CASE_ )
# fmt: off
lowerCamelCase_ = [['sample document', 'Goog', 'This is one header', 'This is a another Header', 'Travel from', 'SFO to JFK', 'on May 2, 2015 at 2:00 pm. For details go to confirm.com', 'Traveler', 'name', 'is', 'John Doe']]
lowerCamelCase_ = [['/html/head/title', '/html/body/a', '/html/body/h1', '/html/body/h2', '/html/body/p', '/html/body/p/p/b[1]', '/html/body/p/p/b[2]/i', '/html/body/p/p/div/h3', '/html/body/p/p/div/h3/b', '/html/body/p/p/div/h3', '/html/body/p/p/div/h3/p']]
# fmt: on
self.assertEqual(encoding.nodes , SCREAMING_SNAKE_CASE_ )
self.assertEqual(encoding.xpaths , SCREAMING_SNAKE_CASE_ )
# Test batched
lowerCamelCase_ = get_html_strings()
lowerCamelCase_ = feature_extractor(SCREAMING_SNAKE_CASE_ )
# fmt: off
lowerCamelCase_ = expected_nodes + [['My First Heading', 'My first paragraph.']]
lowerCamelCase_ = expected_xpaths + [['/html/body/h1', '/html/body/p']]
self.assertEqual(len(encoding.nodes ) , 2 )
self.assertEqual(len(encoding.xpaths ) , 2 )
self.assertEqual(encoding.nodes , SCREAMING_SNAKE_CASE_ )
self.assertEqual(encoding.xpaths , SCREAMING_SNAKE_CASE_ )
| 384 | 1 |
"""simple docstring"""
from __future__ import annotations
from PIL import Image
# Define glider example
__lowerCamelCase = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
__lowerCamelCase = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def lowercase ( __UpperCamelCase ) -> list[list[int]]:
__magic_name__ = []
for i in range(len(__UpperCamelCase ) ):
__magic_name__ = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
__magic_name__ = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(__UpperCamelCase ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(__UpperCamelCase ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(__UpperCamelCase ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
__magic_name__ = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(__UpperCamelCase )
return next_generation
def lowercase ( __UpperCamelCase , __UpperCamelCase ) -> list[Image.Image]:
__magic_name__ = []
for _ in range(__UpperCamelCase ):
# Create output image
__magic_name__ = Image.new('''RGB''' , (len(cells[0] ), len(__UpperCamelCase )) )
__magic_name__ = img.load()
# Save cells to image
for x in range(len(__UpperCamelCase ) ):
for y in range(len(cells[0] ) ):
__magic_name__ = 255 - cells[y][x] * 255
__magic_name__ = (colour, colour, colour)
# Save image
images.append(__UpperCamelCase )
__magic_name__ = new_generation(__UpperCamelCase )
return images
if __name__ == "__main__":
__lowerCamelCase = generate_images(GLIDER, 16)
images[0].save("out.gif", save_all=True, append_images=images[1:])
| 490 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
__lowerCamelCase = logging.get_logger(__name__)
class _lowercase ( __UpperCAmelCase ):
_lowerCamelCase = ['''input_features''', '''attention_mask''']
def __init__( self , UpperCamelCase_=80 , UpperCamelCase_=1_6000 , UpperCamelCase_=80 , UpperCamelCase_=0.0 , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=True , **UpperCamelCase_ , ):
super().__init__(feature_size=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , padding_value=UpperCamelCase_ , **UpperCamelCase_ )
__magic_name__ = num_mel_bins
__magic_name__ = do_ceptral_normalize
__magic_name__ = normalize_means
__magic_name__ = normalize_vars
__magic_name__ = True
def lowerCAmelCase__ ( self , UpperCamelCase_ , ):
__magic_name__ = waveform * (2**15) # Kaldi compliance: 16-bit signed integers
__magic_name__ = torch.from_numpy(UpperCamelCase_ ).unsqueeze(0 )
__magic_name__ = ta_kaldi.fbank(UpperCamelCase_ , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def lowerCAmelCase__ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = True , UpperCamelCase_ = True , UpperCamelCase_ = 0.0 , ):
# make sure we normalize float32 arrays
if normalize_means:
__magic_name__ = x[:input_length].mean(axis=0 )
__magic_name__ = np.subtract(UpperCamelCase_ , UpperCamelCase_ )
if normalize_vars:
__magic_name__ = x[:input_length].std(axis=0 )
__magic_name__ = np.divide(UpperCamelCase_ , UpperCamelCase_ )
if input_length < x.shape[0]:
__magic_name__ = padding_value
# make sure array is in float32
__magic_name__ = x.astype(np.floataa )
return x
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
__magic_name__ = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(UpperCamelCase_ , UpperCamelCase_ , self.normalize_means , self.normalize_vars , self.padding_value )
for x, n in zip(UpperCamelCase_ , UpperCamelCase_ )
]
def __call__( self , UpperCamelCase_ , UpperCamelCase_ = False , UpperCamelCase_ = None , UpperCamelCase_ = False , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , **UpperCamelCase_ , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
__magic_name__ = isinstance(UpperCamelCase_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
__magic_name__ = is_batched_numpy or (
isinstance(UpperCamelCase_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__magic_name__ = [np.asarray(UpperCamelCase_ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(UpperCamelCase_ , np.ndarray ):
__magic_name__ = np.asarray(UpperCamelCase_ , dtype=np.floataa )
elif isinstance(UpperCamelCase_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__magic_name__ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__magic_name__ = [raw_speech]
# extract fbank features
__magic_name__ = [self._extract_fbank_features(UpperCamelCase_ ) for waveform in raw_speech]
# convert into correct format for padding
__magic_name__ = BatchFeature({'''input_features''': features} )
__magic_name__ = self.pad(
UpperCamelCase_ , padding=UpperCamelCase_ , max_length=UpperCamelCase_ , truncation=UpperCamelCase_ , pad_to_multiple_of=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , **UpperCamelCase_ , )
# make sure list is in array format
__magic_name__ = padded_inputs.get('''input_features''' )
if isinstance(input_features[0] , UpperCamelCase_ ):
__magic_name__ = [np.asarray(UpperCamelCase_ , dtype=np.floataa ) for feature in input_features]
__magic_name__ = padded_inputs.get('''attention_mask''' )
if attention_mask is not None:
__magic_name__ = [np.asarray(UpperCamelCase_ , dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
__magic_name__ = (
np.array(UpperCamelCase_ , dtype=np.intaa )
if self._get_padding_strategies(UpperCamelCase_ , max_length=UpperCamelCase_ ) is not PaddingStrategy.DO_NOT_PAD
else None
)
__magic_name__ = self.normalize(
padded_inputs['''input_features'''] , attention_mask=UpperCamelCase_ )
if return_tensors is not None:
__magic_name__ = padded_inputs.convert_to_tensors(UpperCamelCase_ )
return padded_inputs
| 490 | 1 |
'''simple docstring'''
def __lowerCamelCase ( _UpperCamelCase : int , _UpperCamelCase : int ):
'''simple docstring'''
return int((input_a, input_a).count(0 ) != 0 )
def __lowerCamelCase ( ):
'''simple docstring'''
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 43 | '''simple docstring'''
lowercase__ : Union[str, Any] = "\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
lowercase__ : str = [{"type": "code", "content": INSTALL_CONTENT}]
lowercase__ : Any = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 43 | 1 |
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
__lowercase = """platform"""
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class _lowercase :
_lowercase : Union[str, Any] = PegasusConfig
_lowercase : Optional[int] = {}
_lowercase : Union[str, Any] = 'gelu'
def __init__( self : Optional[int] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Optional[int]=1_3 , lowerCamelCase__ : Optional[int]=7 , lowerCamelCase__ : int=True , lowerCamelCase__ : List[str]=False , lowerCamelCase__ : List[str]=9_9 , lowerCamelCase__ : Union[str, Any]=3_2 , lowerCamelCase__ : Any=5 , lowerCamelCase__ : int=4 , lowerCamelCase__ : Optional[int]=3_7 , lowerCamelCase__ : List[Any]=0.1 , lowerCamelCase__ : Any=0.1 , lowerCamelCase__ : Union[str, Any]=2_0 , lowerCamelCase__ : Optional[Any]=2 , lowerCamelCase__ : List[str]=1 , lowerCamelCase__ : Tuple=0 , ) -> Optional[Any]:
"""simple docstring"""
A_ = parent
A_ = batch_size
A_ = seq_length
A_ = is_training
A_ = use_labels
A_ = vocab_size
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = intermediate_size
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = max_position_embeddings
A_ = eos_token_id
A_ = pad_token_id
A_ = bos_token_id
def UpperCamelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
A_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
A_ = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
A_ = np.concatenate([input_ids, eos_tensor] , axis=1 )
A_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
A_ = prepare_pegasus_inputs_dict(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
return config, inputs_dict
def UpperCamelCase ( self : Dict , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : List[str] ) -> Tuple:
"""simple docstring"""
A_ = 2_0
A_ = model_class_name(lowerCamelCase__ )
A_ = model.encode(inputs_dict['''input_ids'''] )
A_ ,A_ = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
A_ = model.init_cache(decoder_input_ids.shape[0] , lowerCamelCase__ , lowerCamelCase__ )
A_ = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
A_ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
A_ = model.decode(
decoder_input_ids[:, :-1] , lowerCamelCase__ , decoder_attention_mask=lowerCamelCase__ , past_key_values=lowerCamelCase__ , decoder_position_ids=lowerCamelCase__ , )
A_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
A_ = model.decode(
decoder_input_ids[:, -1:] , lowerCamelCase__ , decoder_attention_mask=lowerCamelCase__ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowerCamelCase__ , )
A_ = model.decode(lowerCamelCase__ , lowerCamelCase__ )
A_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F"Max diff is {diff}" )
def UpperCamelCase ( self : Dict , lowerCamelCase__ : Tuple , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Optional[Any] ) -> str:
"""simple docstring"""
A_ = 2_0
A_ = model_class_name(lowerCamelCase__ )
A_ = model.encode(inputs_dict['''input_ids'''] )
A_ ,A_ = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
A_ = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
A_ = model.init_cache(decoder_input_ids.shape[0] , lowerCamelCase__ , lowerCamelCase__ )
A_ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
A_ = model.decode(
decoder_input_ids[:, :-1] , lowerCamelCase__ , decoder_attention_mask=lowerCamelCase__ , past_key_values=lowerCamelCase__ , decoder_position_ids=lowerCamelCase__ , )
A_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
A_ = model.decode(
decoder_input_ids[:, -1:] , lowerCamelCase__ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowerCamelCase__ , decoder_position_ids=lowerCamelCase__ , )
A_ = model.decode(lowerCamelCase__ , lowerCamelCase__ , decoder_attention_mask=lowerCamelCase__ )
A_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F"Max diff is {diff}" )
def _lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , ):
'''simple docstring'''
if attention_mask is None:
A_ = np.not_equal(SCREAMING_SNAKE_CASE , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
A_ = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class _lowercase ( __lowerCamelCase,unittest.TestCase ):
_lowercase : Any = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
_lowercase : Optional[int] = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
_lowercase : Optional[int] = True
_lowercase : List[Any] = False
_lowercase : Tuple = False
_lowercase : Union[str, Any] = False
def UpperCamelCase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
A_ = FlaxPegasusModelTester(self )
A_ = ConfigTester(self , config_class=lowerCamelCase__ )
def UpperCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self : int ) -> str:
"""simple docstring"""
A_ ,A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def UpperCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
A_ ,A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def UpperCamelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
A_ ,A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
A_ = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
A_ = model_class(lowerCamelCase__ )
@jax.jit
def encode_jitted(lowerCamelCase__ : str , lowerCamelCase__ : List[Any]=None , **lowerCamelCase__ : List[Any] ):
return model.encode(input_ids=lowerCamelCase__ , attention_mask=lowerCamelCase__ )
with self.subTest('''JIT Enabled''' ):
A_ = encode_jitted(**lowerCamelCase__ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
A_ = encode_jitted(**lowerCamelCase__ ).to_tuple()
self.assertEqual(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) )
for jitted_output, output in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
def UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
A_ ,A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
A_ = model_class(lowerCamelCase__ )
A_ = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
A_ = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(lowerCamelCase__ : Optional[int] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Union[str, Any] ):
return model.decode(
decoder_input_ids=lowerCamelCase__ , decoder_attention_mask=lowerCamelCase__ , encoder_outputs=lowerCamelCase__ , )
with self.subTest('''JIT Enabled''' ):
A_ = decode_jitted(**lowerCamelCase__ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
A_ = decode_jitted(**lowerCamelCase__ ).to_tuple()
self.assertEqual(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) )
for jitted_output, output in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def UpperCamelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
for model_class_name in self.all_model_classes:
A_ = model_class_name.from_pretrained('''google/pegasus-large''' , from_pt=lowerCamelCase__ )
A_ = np.ones((1, 1) )
A_ = model(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
@slow
def UpperCamelCase ( self : int ) -> Any:
"""simple docstring"""
A_ = FlaxPegasusForConditionalGeneration.from_pretrained('''google/pegasus-xsum''' )
A_ = PegasusTokenizer.from_pretrained('''google/pegasus-xsum''' )
A_ = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
A_ = [
'''California\'s largest electricity provider has turned off power to hundreds of thousands of customers.''',
'''Pop group N-Dubz have revealed they were surprised to get four nominations for this year\'s Mobo Awards.''',
]
A_ = tokenizer(lowerCamelCase__ , return_tensors='''np''' , truncation=lowerCamelCase__ , max_length=5_1_2 , padding=lowerCamelCase__ )
A_ = model.generate(**lowerCamelCase__ , num_beams=2 ).sequences
A_ = tokenizer.batch_decode(lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ )
assert tgt_text == decoded
| 203 |
def _lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A_ = len(SCREAMING_SNAKE_CASE )
A_ = len(SCREAMING_SNAKE_CASE )
A_ = (
first_str_length if first_str_length > second_str_length else second_str_length
)
A_ = []
for char_count in range(SCREAMING_SNAKE_CASE ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(alternative_string_arrange("""AB""", """XYZ"""), end=""" """)
| 203 | 1 |
__SCREAMING_SNAKE_CASE : List[Any] ='''ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'''
def UpperCamelCase__ ( lowerCAmelCase__ ):
# Make sure the supplied data is a bytes-like object
if not isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
lowercase = f"""a bytes-like object is required, not '{data.__class__.__name__}'"""
raise TypeError(lowerCAmelCase__ )
lowercase = """""".join(bin(lowerCAmelCase__ )[2:].zfill(8 ) for byte in data )
lowercase = len(lowerCAmelCase__ ) % 6 != 0
if padding_needed:
# The padding that will be added later
lowercase = b"""=""" * ((6 - len(lowerCAmelCase__ ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(lowerCAmelCase__ ) % 6)
else:
lowercase = b""""""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] ,2 )]
for index in range(0 ,len(lowerCAmelCase__ ) ,6 ) ).encode()
+ padding
)
def UpperCamelCase__ ( lowerCAmelCase__ ):
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) and not isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
lowercase = (
"""argument should be a bytes-like object or ASCII string, """
f"""not '{encoded_data.__class__.__name__}'"""
)
raise TypeError(lowerCAmelCase__ )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
try:
lowercase = encoded_data.decode("""utf-8""" )
except UnicodeDecodeError:
raise ValueError("""base64 encoded data should only contain ASCII characters""" )
lowercase = encoded_data.count("""=""" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(lowerCAmelCase__ ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
lowercase = encoded_data[:-padding]
lowercase = """""".join(
bin(B64_CHARSET.index(lowerCAmelCase__ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
lowercase = """""".join(
bin(B64_CHARSET.index(lowerCAmelCase__ ) )[2:].zfill(6 ) for char in encoded_data )
lowercase = [
int(binary_stream[index : index + 8] ,2 )
for index in range(0 ,len(lowerCAmelCase__ ) ,8 )
]
return bytes(lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 715 |
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
__SCREAMING_SNAKE_CASE : Tuple =get_tests_dir('''fixtures/dummy_feature_extractor_config.json''')
__SCREAMING_SNAKE_CASE : Union[str, Any] =get_tests_dir('''fixtures/vocab.json''')
__SCREAMING_SNAKE_CASE : Union[str, Any] =get_tests_dir('''fixtures''')
class A_ ( unittest.TestCase ):
_A :List[str] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou''']
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
lowercase = 0
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
lowercase = AutoProcessor.from_pretrained("""facebook/wav2vec2-base-960h""" )
self.assertIsInstance(snake_case__ , snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase = WavaVecaConfig()
lowercase = AutoProcessor.from_pretrained("""facebook/wav2vec2-base-960h""" )
# save in new folder
model_config.save_pretrained(snake_case__ )
processor.save_pretrained(snake_case__ )
lowercase = AutoProcessor.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(snake_case__ , os.path.join(snake_case__ , snake_case__ ) )
copyfile(snake_case__ , os.path.join(snake_case__ , """vocab.json""" ) )
lowercase = AutoProcessor.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : int ):
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase = WavaVecaFeatureExtractor()
lowercase = AutoTokenizer.from_pretrained("""facebook/wav2vec2-base-960h""" )
lowercase = WavaVecaProcessor(snake_case__ , snake_case__ )
# save in new folder
processor.save_pretrained(snake_case__ )
# drop `processor_class` in tokenizer
with open(os.path.join(snake_case__ , snake_case__ ) , """r""" ) as f:
lowercase = json.load(snake_case__ )
config_dict.pop("""processor_class""" )
with open(os.path.join(snake_case__ , snake_case__ ) , """w""" ) as f:
f.write(json.dumps(snake_case__ ) )
lowercase = AutoProcessor.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase = WavaVecaFeatureExtractor()
lowercase = AutoTokenizer.from_pretrained("""facebook/wav2vec2-base-960h""" )
lowercase = WavaVecaProcessor(snake_case__ , snake_case__ )
# save in new folder
processor.save_pretrained(snake_case__ )
# drop `processor_class` in feature extractor
with open(os.path.join(snake_case__ , snake_case__ ) , """r""" ) as f:
lowercase = json.load(snake_case__ )
config_dict.pop("""processor_class""" )
with open(os.path.join(snake_case__ , snake_case__ ) , """w""" ) as f:
f.write(json.dumps(snake_case__ ) )
lowercase = AutoProcessor.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : str ):
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase = WavaVecaConfig(processor_class="""Wav2Vec2Processor""" )
model_config.save_pretrained(snake_case__ )
# copy relevant files
copyfile(snake_case__ , os.path.join(snake_case__ , """vocab.json""" ) )
# create emtpy sample processor
with open(os.path.join(snake_case__ , snake_case__ ) , """w""" ) as f:
f.write("""{}""" )
lowercase = AutoProcessor.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(snake_case__ ):
lowercase = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(snake_case__ ):
lowercase = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=snake_case__ )
lowercase = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=snake_case__ )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
lowercase = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
lowercase = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
lowercase = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=snake_case__ , use_fast=snake_case__ )
lowercase = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , """NewTokenizer""" )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
try:
AutoConfig.register("""custom""" , snake_case__ )
AutoFeatureExtractor.register(snake_case__ , snake_case__ )
AutoTokenizer.register(snake_case__ , slow_tokenizer_class=snake_case__ )
AutoProcessor.register(snake_case__ , snake_case__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(snake_case__ ):
AutoProcessor.register(snake_case__ , snake_case__ )
# Now that the config is registered, it can be used as any other config with the auto-API
lowercase = CustomFeatureExtractor.from_pretrained(snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase = os.path.join(snake_case__ , """vocab.txt""" )
with open(snake_case__ , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
lowercase = CustomTokenizer(snake_case__ )
lowercase = CustomProcessor(snake_case__ , snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(snake_case__ )
lowercase = AutoProcessor.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
class A_ ( __a ):
_A :List[str] = False
class A_ ( __a ):
_A :Dict = False
class A_ ( __a ):
_A :Union[str, Any] = '''AutoFeatureExtractor'''
_A :Tuple = '''AutoTokenizer'''
_A :Optional[Any] = False
try:
AutoConfig.register("""custom""" , snake_case__ )
AutoFeatureExtractor.register(snake_case__ , snake_case__ )
AutoTokenizer.register(snake_case__ , slow_tokenizer_class=snake_case__ )
AutoProcessor.register(snake_case__ , snake_case__ )
# If remote code is not set, the default is to use local classes.
lowercase = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
lowercase = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=snake_case__ )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
lowercase = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=snake_case__ )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
lowercase = AutoProcessor.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(processor.__class__.__name__ , """BertTokenizerFast""" )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
lowercase = AutoProcessor.from_pretrained("""hf-internal-testing/tiny-random-convnext""" )
self.assertEqual(processor.__class__.__name__ , """ConvNextImageProcessor""" )
@is_staging_test
class A_ ( unittest.TestCase ):
_A :Optional[int] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou''']
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Optional[int] ):
lowercase = TOKEN
HfFolder.save_token(snake_case__ )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Union[str, Any] ):
try:
delete_repo(token=cls._token , repo_id="""test-processor""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-processor-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-processor""" )
except HTTPError:
pass
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
lowercase = WavaVecaProcessor.from_pretrained(snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(snake_case__ , """test-processor""" ) , push_to_hub=snake_case__ , use_auth_token=self._token )
lowercase = WavaVecaProcessor.from_pretrained(F"""{USER}/test-processor""" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(snake_case__ , getattr(new_processor.feature_extractor , snake_case__ ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
lowercase = WavaVecaProcessor.from_pretrained(snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(snake_case__ , """test-processor-org""" ) , push_to_hub=snake_case__ , use_auth_token=self._token , organization="""valid_org""" , )
lowercase = WavaVecaProcessor.from_pretrained("""valid_org/test-processor-org""" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(snake_case__ , getattr(new_processor.feature_extractor , snake_case__ ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
lowercase = CustomFeatureExtractor.from_pretrained(snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase = os.path.join(snake_case__ , """vocab.txt""" )
with open(snake_case__ , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
lowercase = CustomTokenizer(snake_case__ )
lowercase = CustomProcessor(snake_case__ , snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(F"""{USER}/test-dynamic-processor""" , token=self._token )
lowercase = Repository(snake_case__ , clone_from=F"""{USER}/test-dynamic-processor""" , token=self._token )
processor.save_pretrained(snake_case__ )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
"""AutoFeatureExtractor""": """custom_feature_extraction.CustomFeatureExtractor""",
"""AutoProcessor""": """custom_processing.CustomProcessor""",
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(snake_case__ , """tokenizer_config.json""" ) ) as f:
lowercase = json.load(snake_case__ )
self.assertDictEqual(
tokenizer_config["""auto_map"""] , {
"""AutoTokenizer""": ["""custom_tokenization.CustomTokenizer""", None],
"""AutoProcessor""": """custom_processing.CustomProcessor""",
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(snake_case__ , """custom_feature_extraction.py""" ) ) )
self.assertTrue(os.path.isfile(os.path.join(snake_case__ , """custom_tokenization.py""" ) ) )
self.assertTrue(os.path.isfile(os.path.join(snake_case__ , """custom_processing.py""" ) ) )
repo.push_to_hub()
lowercase = AutoProcessor.from_pretrained(F"""{USER}/test-dynamic-processor""" , trust_remote_code=snake_case__ )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , """CustomProcessor""" )
| 72 | 0 |
import comet # From: unbabel-comet
import torch
import datasets
snake_case = datasets.logging.get_logger(__name__)
snake_case = """\
@inproceedings{rei-EtAl:2020:WMT,
author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},
title = {Unbabel's Participation in the WMT20 Metrics Shared Task},
booktitle = {Proceedings of the Fifth Conference on Machine Translation},
month = {November},
year = {2020},
address = {Online},
publisher = {Association for Computational Linguistics},
pages = {909--918},
}
@inproceedings{rei-etal-2020-comet,
title = \"{COMET}: A Neural Framework for {MT} Evaluation\",
author = \"Rei, Ricardo and
Stewart, Craig and
Farinha, Ana C and
Lavie, Alon\",
booktitle = \"Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)\",
month = nov,
year = \"2020\",
address = \"Online\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/2020.emnlp-main.213\",
pages = \"2685--2702\",
}
"""
snake_case = """\
Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA's or MQM).
With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.
See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.
"""
snake_case = """
COMET score.
Args:
`sources` (list of str): Source sentences
`predictions` (list of str): candidate translations
`references` (list of str): reference translations
`cuda` (bool): If set to True, runs COMET using GPU
`show_progress` (bool): Shows progress
`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.
Returns:
`samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.
`scores`: List of scores.
Examples:
>>> comet_metric = datasets.load_metric('comet')
>>> # comet_metric = load_metric('comet', 'wmt20-comet-da') # you can also choose which model to use
>>> source = [\"Dem Feuer konnte Einhalt geboten werden\", \"Schulen und Kindergärten wurden eröffnet.\"]
>>> hypothesis = [\"The fire could be stopped\", \"Schools and kindergartens were open\"]
>>> reference = [\"They were able to control the fire.\", \"Schools and kindergartens opened\"]
>>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)
>>> print([round(v, 2) for v in results[\"scores\"]])
[0.19, 0.92]
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE ( datasets.Metric ):
'''simple docstring'''
def _A ( self : Optional[int] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://unbabel.github.io/COMET/html/index.html" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"sources": datasets.Value("string" , id="sequence" ),
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/Unbabel/COMET"] , reference_urls=[
"https://github.com/Unbabel/COMET",
"https://www.aclweb.org/anthology/2020.emnlp-main.213/",
"http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6",
] , )
def _A ( self : List[Any] , UpperCAmelCase_ : List[Any] ):
if self.config_name == "default":
SCREAMING_SNAKE_CASE : int = comet.load_from_checkpoint(comet.download_model("wmt20-comet-da" ) )
else:
SCREAMING_SNAKE_CASE : Tuple = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def _A ( self : Dict , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : Optional[int]=False ):
if gpus is None:
SCREAMING_SNAKE_CASE : List[Any] = 1 if torch.cuda.is_available() else 0
SCREAMING_SNAKE_CASE : int = {"src": sources, "mt": predictions, "ref": references}
SCREAMING_SNAKE_CASE : str = [dict(zip(UpperCAmelCase_ , UpperCAmelCase_ ) ) for t in zip(*data.values() )]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = self.scorer.predict(UpperCAmelCase_ , gpus=UpperCAmelCase_ , progress_bar=UpperCAmelCase_ )
return {"mean_score": mean_score, "scores": scores}
| 62 |
"""simple docstring"""
def UpperCAmelCase ( A : list[int] , A : list[int] ):
'''simple docstring'''
if not len(A ) == len(A ) == 3:
raise ValueError('Please enter a valid equation.' )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError('Both a & b of two equations can\'t be zero.' )
# Extract the coefficients
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = equationa
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = equationa
# Calculate the determinants of the matrices
_UpperCAmelCase = aa * ba - aa * ba
_UpperCAmelCase = ca * ba - ca * ba
_UpperCAmelCase = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError('Infinite solutions. (Consistent system)' )
else:
raise ValueError('No solution. (Inconsistent system)' )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
_UpperCAmelCase = determinant_x / determinant
_UpperCAmelCase = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 573 | 0 |
"""simple docstring"""
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class a__ ( nn.Module ):
def __init__( self, _UpperCAmelCase = 16, _UpperCAmelCase = 88, _UpperCAmelCase = None, _UpperCAmelCase = 1, _UpperCAmelCase = 0.0, _UpperCAmelCase = 32, _UpperCAmelCase = None, _UpperCAmelCase = False, _UpperCAmelCase = None, _UpperCAmelCase = None, _UpperCAmelCase = "geglu", _UpperCAmelCase = None, ):
'''simple docstring'''
super().__init__()
lowercase__ = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=_UpperCAmelCase, attention_head_dim=_UpperCAmelCase, in_channels=_UpperCAmelCase, num_layers=_UpperCAmelCase, dropout=_UpperCAmelCase, norm_num_groups=_UpperCAmelCase, cross_attention_dim=_UpperCAmelCase, attention_bias=_UpperCAmelCase, sample_size=_UpperCAmelCase, num_vector_embeds=_UpperCAmelCase, activation_fn=_UpperCAmelCase, num_embeds_ada_norm=_UpperCAmelCase, )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
lowercase__ = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
lowercase__ = [77, 257]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
lowercase__ = [1, 0]
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase=None, _UpperCAmelCase=None, _UpperCAmelCase=None, _UpperCAmelCase = True, ):
'''simple docstring'''
lowercase__ = hidden_states
lowercase__ = []
lowercase__ = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
lowercase__ = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
lowercase__ = self.transformer_index_for_condition[i]
lowercase__ = self.transformers[transformer_index](
_UpperCAmelCase, encoder_hidden_states=_UpperCAmelCase, timestep=_UpperCAmelCase, cross_attention_kwargs=_UpperCAmelCase, return_dict=_UpperCAmelCase, )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
lowercase__ = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
lowercase__ = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=_UpperCAmelCase )
| 668 | """simple docstring"""
from typing import Any
import numpy as np
def __a ( A ):
'''simple docstring'''
return np.array_equal(A , matrix.conjugate().T )
def __a ( A , A ):
'''simple docstring'''
lowercase__ = v.conjugate().T
lowercase__ = v_star.dot(A )
assert isinstance(A , np.ndarray )
return (v_star_dot.dot(A )) / (v_star.dot(A ))
def __a ( ):
'''simple docstring'''
lowercase__ = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
lowercase__ = np.array([[1], [2], [3]] )
assert is_hermitian(A ), f'''{a} is not hermitian.'''
print(rayleigh_quotient(A , A ) )
lowercase__ = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(A ), f'''{a} is not hermitian.'''
assert rayleigh_quotient(A , A ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 668 | 1 |
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class _snake_case ( UpperCAmelCase_ ):
__lowerCAmelCase : Optional[int] = ''
__lowerCAmelCase : List[Any] = 'hf-legacy' # "hf://"" is reserved for hffs
def __init__( self , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ):
'''simple docstring'''
super().__init__(self , **SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[int] = repo_info
lowercase__ : Tuple = token
lowercase__ : Any = None
def lowercase__ ( self):
'''simple docstring'''
if self.dir_cache is None:
lowercase__ : Any = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
lowercase__ : Optional[int] = {
"""name""": hf_file.rfilename,
"""size""": None,
"""type""": """file""",
}
self.dir_cache.update(
{
str(SCREAMING_SNAKE_CASE_): {"""name""": str(SCREAMING_SNAKE_CASE_), """size""": None, """type""": """directory"""}
for d in list(PurePosixPath(hf_file.rfilename).parents)[:-1]
})
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = "rb" , **SCREAMING_SNAKE_CASE_ , ):
'''simple docstring'''
if not isinstance(self.repo_info , SCREAMING_SNAKE_CASE_):
raise NotImplementedError(f'Open is only implemented for dataset repositories, but got {self.repo_info}')
lowercase__ : Tuple = hf_hub_url(self.repo_info.id , SCREAMING_SNAKE_CASE_ , revision=self.repo_info.sha)
return fsspec.open(
SCREAMING_SNAKE_CASE_ , mode=SCREAMING_SNAKE_CASE_ , headers=get_authentication_headers_for_url(SCREAMING_SNAKE_CASE_ , use_auth_token=self.token) , client_kwargs={"""trust_env""": True} , ).open()
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self._get_dirs()
lowercase__ : Any = self._strip_protocol(SCREAMING_SNAKE_CASE_)
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self._get_dirs()
lowercase__ : int = PurePosixPath(path.strip("""/"""))
lowercase__ : Optional[int] = {}
for p, f in self.dir_cache.items():
lowercase__ : Optional[Any] = PurePosixPath(p.strip("""/"""))
lowercase__ : str = p.parent
if root == path:
lowercase__ : Optional[Any] = f
lowercase__ : Dict = list(paths.values())
if detail:
return out
else:
return sorted(f["""name"""] for f in out)
| 12 |
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class __SCREAMING_SNAKE_CASE( a_ ):
_UpperCAmelCase = "char"
_UpperCAmelCase = "bpe"
_UpperCAmelCase = "wp"
__UpperCamelCase : Optional[Any] = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class __SCREAMING_SNAKE_CASE( a_ ):
_UpperCAmelCase = ["image_processor", "char_tokenizer"]
_UpperCAmelCase = "ViTImageProcessor"
_UpperCAmelCase = "MgpstrTokenizer"
def __init__( self: Optional[int] , UpperCamelCase: Dict=None , UpperCamelCase: Any=None , **UpperCamelCase: Any ) -> str:
snake_case__ = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , UpperCamelCase , )
snake_case__ = kwargs.pop('feature_extractor' )
snake_case__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
snake_case__ = tokenizer
snake_case__ = AutoTokenizer.from_pretrained('gpt2' )
snake_case__ = AutoTokenizer.from_pretrained('bert-base-uncased' )
super().__init__(UpperCamelCase , UpperCamelCase )
def __call__( self: str , UpperCamelCase: List[str]=None , UpperCamelCase: Any=None , UpperCamelCase: Optional[Any]=None , **UpperCamelCase: Optional[int] ) -> List[str]:
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.' )
if images is not None:
snake_case__ = self.image_processor(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
if text is not None:
snake_case__ = self.char_tokenizer(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
snake_case__ = encodings['input_ids']
return inputs
def lowerCAmelCase_ ( self: Optional[Any] , UpperCamelCase: List[str] ) -> int:
snake_case__ , snake_case__ , snake_case__ = sequences
snake_case__ = char_preds.size(0 )
snake_case__ , snake_case__ = self._decode_helper(UpperCamelCase , 'char' )
snake_case__ , snake_case__ = self._decode_helper(UpperCamelCase , 'bpe' )
snake_case__ , snake_case__ = self._decode_helper(UpperCamelCase , 'wp' )
snake_case__ = []
snake_case__ = []
for i in range(UpperCamelCase ):
snake_case__ = [char_scores[i], bpe_scores[i], wp_scores[i]]
snake_case__ = [char_strs[i], bpe_strs[i], wp_strs[i]]
snake_case__ = scores.index(max(UpperCamelCase ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
snake_case__ = {}
snake_case__ = final_strs
snake_case__ = final_scores
snake_case__ = char_strs
snake_case__ = bpe_strs
snake_case__ = wp_strs
return out
def lowerCAmelCase_ ( self: str , UpperCamelCase: str , UpperCamelCase: Tuple ) -> Optional[int]:
if format == DecodeType.CHARACTER:
snake_case__ = self.char_decode
snake_case__ = 1
snake_case__ = '[s]'
elif format == DecodeType.BPE:
snake_case__ = self.bpe_decode
snake_case__ = 2
snake_case__ = '#'
elif format == DecodeType.WORDPIECE:
snake_case__ = self.wp_decode
snake_case__ = 1_02
snake_case__ = '[SEP]'
else:
raise ValueError(F'''Format {format} is not supported.''' )
snake_case__ , snake_case__ = [], []
snake_case__ = pred_logits.size(0 )
snake_case__ = pred_logits.size(1 )
snake_case__ , snake_case__ = pred_logits.topk(1 , dim=-1 , largest=UpperCamelCase , sorted=UpperCamelCase )
snake_case__ = preds_index.view(-1 , UpperCamelCase )[:, 1:]
snake_case__ = decoder(UpperCamelCase )
snake_case__ , snake_case__ = torch.nn.functional.softmax(UpperCamelCase , dim=2 ).max(dim=2 )
snake_case__ = preds_max_prob[:, 1:]
for index in range(UpperCamelCase ):
snake_case__ = preds_str[index].find(UpperCamelCase )
snake_case__ = preds_str[index][:pred_eos]
snake_case__ = preds_index[index].cpu().tolist()
snake_case__ = pred_index.index(UpperCamelCase ) if eos_token in pred_index else -1
snake_case__ = preds_max_prob[index][: pred_eos_index + 1]
snake_case__ = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(UpperCamelCase )
conf_scores.append(UpperCamelCase )
return dec_strs, conf_scores
def lowerCAmelCase_ ( self: Optional[Any] , UpperCamelCase: str ) -> int:
snake_case__ = [seq.replace(' ' , '' ) for seq in self.char_tokenizer.batch_decode(UpperCamelCase )]
return decode_strs
def lowerCAmelCase_ ( self: int , UpperCamelCase: Optional[int] ) -> Dict:
return self.bpe_tokenizer.batch_decode(UpperCamelCase )
def lowerCAmelCase_ ( self: Union[str, Any] , UpperCamelCase: str ) -> Union[str, Any]:
snake_case__ = [seq.replace(' ' , '' ) for seq in self.wp_tokenizer.batch_decode(UpperCamelCase )]
return decode_strs
| 328 | 0 |
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs("hub/hopper-medium-v2/unet/hor32", exist_ok=True)
os.makedirs("hub/hopper-medium-v2/unet/hor128", exist_ok=True)
os.makedirs("hub/hopper-medium-v2/value_function", exist_ok=True)
def __lowercase( __snake_case : Optional[Any] ) -> Union[str, Any]:
if hor == 1_28:
__snake_case = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
__snake_case = (32, 1_28, 2_56)
__snake_case = ('UpResnetBlock1D', 'UpResnetBlock1D')
elif hor == 32:
__snake_case = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
__snake_case = (32, 64, 1_28, 2_56)
__snake_case = ('UpResnetBlock1D', 'UpResnetBlock1D', 'UpResnetBlock1D')
__snake_case = torch.load(f'''/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch''' )
__snake_case = model.state_dict()
__snake_case = {
'down_block_types': down_block_types,
'block_out_channels': block_out_channels,
'up_block_types': up_block_types,
'layers_per_block': 1,
'use_timestep_embedding': True,
'out_block_type': 'OutConv1DBlock',
'norm_num_groups': 8,
'downsample_each_block': False,
'in_channels': 14,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'flip_sin_to_cos': False,
'freq_shift': 1,
'sample_size': 6_55_36,
'mid_block_type': 'MidResTemporalBlock1D',
'act_fn': 'mish',
}
__snake_case = UNetaDModel(**__snake_case )
print(f'''length of state dict: {len(state_dict.keys() )}''' )
print(f'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
__snake_case = dict(zip(model.state_dict().keys() ,hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
__snake_case = state_dict.pop(__snake_case )
hf_value_function.load_state_dict(__snake_case )
torch.save(hf_value_function.state_dict() ,f'''hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin''' )
with open(f'''hub/hopper-medium-v2/unet/hor{hor}/config.json''' ,'w' ) as f:
json.dump(__snake_case ,__snake_case )
def __lowercase( ) -> Union[str, Any]:
__snake_case = {
'in_channels': 14,
'down_block_types': ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D'),
'up_block_types': (),
'out_block_type': 'ValueFunction',
'mid_block_type': 'ValueFunctionMidBlock1D',
'block_out_channels': (32, 64, 1_28, 2_56),
'layers_per_block': 1,
'downsample_each_block': True,
'sample_size': 6_55_36,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'use_timestep_embedding': True,
'flip_sin_to_cos': False,
'freq_shift': 1,
'norm_num_groups': 8,
'act_fn': 'mish',
}
__snake_case = torch.load('/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch' )
__snake_case = model
__snake_case = UNetaDModel(**__snake_case )
print(f'''length of state dict: {len(state_dict.keys() )}''' )
print(f'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
__snake_case = dict(zip(state_dict.keys() ,hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
__snake_case = state_dict.pop(__snake_case )
hf_value_function.load_state_dict(__snake_case )
torch.save(hf_value_function.state_dict() ,'hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin' )
with open('hub/hopper-medium-v2/value_function/config.json' ,'w' ) as f:
json.dump(__snake_case ,__snake_case )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 345 |
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCamelCase (lowerCamelCase , unittest.TestCase ):
lowercase__ = GPTSanJapaneseTokenizer
lowercase__ = False
lowercase__ = {"""do_clean_text""": False, """add_prefix_space""": False}
def __lowerCamelCase ( self ):
super().setUp()
# fmt: off
__snake_case = ['こん', 'こんに', 'にちは', 'ばんは', '世界,㔺界', '、', '。', '<BR>', '<SP>', '<TAB>', '<URL>', '<EMAIL>', '<TEL>', '<DATE>', '<PRICE>', '<BLOCK>', '<KIGOU>', '<U2000U2BFF>', '<|emoji1|>', '<unk>', '<|bagoftoken|>', '<|endoftext|>']
# fmt: on
__snake_case = {'emoji': {'\ud83d\ude00': '<|emoji1|>'}, 'emoji_inv': {'<|emoji1|>': '\ud83d\ude00'}} # 😀
__snake_case = {'unk_token': '<unk>'}
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['emoji_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
with open(self.emoji_file , 'w' ) as emoji_writer:
emoji_writer.write(json.dumps(SCREAMING_SNAKE_CASE_ ) )
def __lowerCamelCase ( self , **SCREAMING_SNAKE_CASE_ ):
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE_ ):
__snake_case = 'こんにちは、世界。 \nこんばんは、㔺界。😀'
__snake_case = 'こんにちは、世界。 \nこんばんは、世界。😀'
return input_text, output_text
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE_ ):
__snake_case , __snake_case = self.get_input_output_texts(SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer.decode(SCREAMING_SNAKE_CASE_ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )
return text, ids
def __lowerCamelCase ( self ):
pass # TODO add if relevant
def __lowerCamelCase ( self ):
pass # TODO add if relevant
def __lowerCamelCase ( self ):
pass # TODO add if relevant
def __lowerCamelCase ( self ):
__snake_case = self.get_tokenizer()
# Testing tokenization
__snake_case = 'こんにちは、世界。 こんばんは、㔺界。'
__snake_case = ['こん', 'にちは', '、', '世界', '。', '<SP>', 'こん', 'ばんは', '、', '㔺界', '。']
__snake_case = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Testing conversion to ids without special tokens
__snake_case = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
__snake_case = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Testing conversion to ids with special tokens
__snake_case = tokens + [tokenizer.unk_token]
__snake_case = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
__snake_case = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def __lowerCamelCase ( self ):
__snake_case = self.get_tokenizer()
# Testing tokenization
__snake_case = 'こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。'
__snake_case = 'こんにちは、、、、世界。こんばんは、、、、世界。'
__snake_case = tokenizer.encode(SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@slow
def __lowerCamelCase ( self ):
__snake_case = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
# Testing tokenization
__snake_case = 'こんにちは、世界。'
__snake_case = 'こんばんは、㔺界。😀'
__snake_case = 'こんにちは、世界。こんばんは、世界。😀'
__snake_case = tokenizer.encode(prefix_text + input_text )
__snake_case = tokenizer.encode('' , prefix_text=prefix_text + input_text )
__snake_case = tokenizer.encode(SCREAMING_SNAKE_CASE_ , prefix_text=SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@slow
def __lowerCamelCase ( self ):
__snake_case = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
# Testing tokenization
__snake_case = 'こんにちは、世界。'
__snake_case = 'こんばんは、㔺界。😀'
__snake_case = len(tokenizer.encode(SCREAMING_SNAKE_CASE_ ) ) - 2
__snake_case = len(tokenizer.encode(SCREAMING_SNAKE_CASE_ ) ) - 2
__snake_case = [1] + [0] * (len_prefix + len_text + 1)
__snake_case = [1] * (len_prefix + len_text + 1) + [0]
__snake_case = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
__snake_case = tokenizer(prefix_text + input_text ).token_type_ids
__snake_case = tokenizer('' , prefix_text=prefix_text + input_text ).token_type_ids
__snake_case = tokenizer(SCREAMING_SNAKE_CASE_ , prefix_text=SCREAMING_SNAKE_CASE_ ).token_type_ids
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@slow
def __lowerCamelCase ( self ):
__snake_case = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
__snake_case = tokenizer.encode('あンいワ' )
__snake_case = tokenizer.encode('' , prefix_text='あンいワ' )
__snake_case = tokenizer.encode('いワ' , prefix_text='あン' )
self.assertEqual(tokenizer.decode(SCREAMING_SNAKE_CASE_ ) , tokenizer.decode(SCREAMING_SNAKE_CASE_ ) )
self.assertEqual(tokenizer.decode(SCREAMING_SNAKE_CASE_ ) , tokenizer.decode(SCREAMING_SNAKE_CASE_ ) )
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def __lowerCamelCase ( self ):
__snake_case = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
__snake_case = [['武田信玄', 'は、'], ['織田信長', 'の配下の、']]
__snake_case = tokenizer(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer.batch_encode_plus(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ )
# fmt: off
__snake_case = [[35_993, 8_640, 25_948, 35_998, 30_647, 35_675, 35_999, 35_999], [35_993, 10_382, 9_868, 35_998, 30_646, 9_459, 30_646, 35_675]]
__snake_case = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
__snake_case = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(x_token.token_type_ids , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(x_token.attention_mask , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(x_token_a.input_ids , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(x_token_a.token_type_ids , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(x_token_a.attention_mask , SCREAMING_SNAKE_CASE_ )
def __lowerCamelCase ( self ):
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def __lowerCamelCase ( self ):
# tokenizer has no padding token
pass
| 345 | 1 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _a ( UpperCamelCase__ ):
_lowercase : str = ['''image_processor''', '''tokenizer''']
_lowercase : Any = '''CLIPImageProcessor'''
_lowercase : str = ('''XLMRobertaTokenizer''', '''XLMRobertaTokenizerFast''')
def __init__( self: str , UpperCamelCase_: Any=None , UpperCamelCase_: Optional[int]=None , **UpperCamelCase_: List[str] ) -> Dict:
"""simple docstring"""
lowercase__ = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , UpperCamelCase_ , )
lowercase__ = kwargs.pop('''feature_extractor''' )
lowercase__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(UpperCamelCase_ , UpperCamelCase_ )
def __call__( self: int , UpperCamelCase_: Optional[Any]=None , UpperCamelCase_: Tuple=None , UpperCamelCase_: Tuple=None , **UpperCamelCase_: Union[str, Any] ) -> Tuple:
"""simple docstring"""
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
lowercase__ = self.tokenizer(UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_ )
if images is not None:
lowercase__ = self.image_processor(UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_ )
if text is not None and images is not None:
lowercase__ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCamelCase_ ) , tensor_type=UpperCamelCase_ )
def lowerCamelCase_ ( self: Any , *UpperCamelCase_: Optional[Any] , **UpperCamelCase_: int ) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*UpperCamelCase_ , **UpperCamelCase_ )
def lowerCamelCase_ ( self: int , *UpperCamelCase_: str , **UpperCamelCase_: List[Any] ) -> Optional[Any]:
"""simple docstring"""
return self.tokenizer.decode(*UpperCamelCase_ , **UpperCamelCase_ )
@property
def lowerCamelCase_ ( self: Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.tokenizer.model_input_names
lowercase__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 43 | """simple docstring"""
from __future__ import annotations
import math
from collections.abc import Callable
def SCREAMING_SNAKE_CASE ( snake_case, snake_case, snake_case, snake_case = 1_00, ):
__snake_case = x_start
__snake_case = fnc(snake_case)
__snake_case = 0.0
for _ in range(snake_case):
# Approximates curve as a sequence of linear lines and sums their length
__snake_case = (x_end - x_start) / steps + xa
__snake_case = fnc(snake_case)
length += math.hypot(xa - xa, fxa - fxa)
# Increment step
__snake_case = xa
__snake_case = fxa
return length
if __name__ == "__main__":
def SCREAMING_SNAKE_CASE ( snake_case):
return math.sin(10 * x)
print("f(x) = sin(10 * x)")
print("The length of the curve from x = -10 to x = 10 is:")
__lowercase : List[str] = 10
while i <= 10_0000:
print(F"""With {i} steps: {line_length(f, -10, 10, i)}""")
i *= 10 | 564 | 0 |
'''simple docstring'''
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class a ( _lowerCamelCase ):
@staticmethod
@abstractmethod
def A_ ( lowercase_ : ArgumentParser ):
raise NotImplementedError()
@abstractmethod
def A_ ( self : List[str] ):
raise NotImplementedError()
| 593 |
'''simple docstring'''
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
a : Any = '%20'.join(argv[1:]) if len(argv) > 1 else quote(str(input('Search: ')))
print('Googling.....')
a : Tuple = f'''https://www.google.com/search?q={query}&num=100'''
a : Optional[Any] = requests.get(
url,
headers={'User-Agent': str(UserAgent().random)},
)
try:
a : List[str] = (
BeautifulSoup(res.text, 'html.parser')
.find('div', attrs={'class': 'yuRUbf'})
.find('a')
.get('href')
)
except AttributeError:
a : Optional[int] = parse_qs(
BeautifulSoup(res.text, 'html.parser')
.find('div', attrs={'class': 'kCrYT'})
.find('a')
.get('href')
)['url'][0]
webbrowser.open(link)
| 593 | 1 |
from __future__ import annotations
snake_case = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def lowerCamelCase__ ( lowercase , lowercase , lowercase , lowercase , lowercase , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = [
[0 for col in range(len(grid[0] ) )] for row in range(len(lowercase ) )
] # the reference grid
SCREAMING_SNAKE_CASE : Any = 1
SCREAMING_SNAKE_CASE : int = [
[0 for col in range(len(grid[0] ) )] for row in range(len(lowercase ) )
] # the action grid
SCREAMING_SNAKE_CASE : Tuple = init[0]
SCREAMING_SNAKE_CASE : str = init[1]
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
SCREAMING_SNAKE_CASE : Tuple = g + heuristic[x][y] # cost from starting cell to destination cell
SCREAMING_SNAKE_CASE : str = [[f, g, x, y]]
SCREAMING_SNAKE_CASE : Any = False # flag that is set when search is complete
SCREAMING_SNAKE_CASE : List[Any] = False # flag set if we can't find expand
while not found and not resign:
if len(lowercase ) == 0:
raise ValueError("Algorithm is unable to find solution" )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
SCREAMING_SNAKE_CASE : Dict = cell.pop()
SCREAMING_SNAKE_CASE : str = next_cell[2]
SCREAMING_SNAKE_CASE : Dict = next_cell[3]
SCREAMING_SNAKE_CASE : Tuple = next_cell[1]
if x == goal[0] and y == goal[1]:
SCREAMING_SNAKE_CASE : Optional[Any] = True
else:
for i in range(len(lowercase ) ): # to try out different valid actions
SCREAMING_SNAKE_CASE : Optional[Any] = x + DIRECTIONS[i][0]
SCREAMING_SNAKE_CASE : str = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(lowercase ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
SCREAMING_SNAKE_CASE : List[Any] = g + cost
SCREAMING_SNAKE_CASE : Dict = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
SCREAMING_SNAKE_CASE : Optional[int] = 1
SCREAMING_SNAKE_CASE : Tuple = i
SCREAMING_SNAKE_CASE : Any = []
SCREAMING_SNAKE_CASE : Optional[Any] = goal[0]
SCREAMING_SNAKE_CASE : Any = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
SCREAMING_SNAKE_CASE : List[str] = x - DIRECTIONS[action[x][y]][0]
SCREAMING_SNAKE_CASE : Optional[int] = y - DIRECTIONS[action[x][y]][1]
SCREAMING_SNAKE_CASE : int = xa
SCREAMING_SNAKE_CASE : Optional[Any] = ya
invpath.append([x, y] )
SCREAMING_SNAKE_CASE : Any = []
for i in range(len(lowercase ) ):
path.append(invpath[len(lowercase ) - 1 - i] )
return path, action
if __name__ == "__main__":
snake_case = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
snake_case = [0, 0]
# all coordinates are given in format [y,x]
snake_case = [len(grid) - 1, len(grid[0]) - 1]
snake_case = 1
# the cost map which pushes the path closer to the goal
snake_case = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
snake_case = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
snake_case = 99
snake_case , snake_case = search(grid, init, goal, cost, heuristic)
print("""ACTION MAP""")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 62 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case__ : Dict = {
"""configuration_whisper""": ["""WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """WhisperConfig""", """WhisperOnnxConfig"""],
"""feature_extraction_whisper""": ["""WhisperFeatureExtractor"""],
"""processing_whisper""": ["""WhisperProcessor"""],
"""tokenization_whisper""": ["""WhisperTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : str = ["""WhisperTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Optional[int] = [
"""WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""WhisperForConditionalGeneration""",
"""WhisperModel""",
"""WhisperPreTrainedModel""",
"""WhisperForAudioClassification""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : List[str] = [
"""TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFWhisperForConditionalGeneration""",
"""TFWhisperModel""",
"""TFWhisperPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Any = [
"""FlaxWhisperForConditionalGeneration""",
"""FlaxWhisperModel""",
"""FlaxWhisperPreTrainedModel""",
"""FlaxWhisperForAudioClassification""",
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
snake_case__ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 402 | 0 |
'''simple docstring'''
a : Any = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
def lowercase ( __magic_name__ ):
'''simple docstring'''
if not isinstance(__magic_name__ , __magic_name__ ):
UpperCAmelCase : Dict = F"a bytes-like object is required, not '{data.__class__.__name__}'"
raise TypeError(__magic_name__ )
UpperCAmelCase : List[str] = "".join(bin(__magic_name__ )[2:].zfill(8 ) for byte in data )
UpperCAmelCase : List[str] = len(__magic_name__ ) % 6 != 0
if padding_needed:
# The padding that will be added later
UpperCAmelCase : Any = b"=" * ((6 - len(__magic_name__ ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(__magic_name__ ) % 6)
else:
UpperCAmelCase : Optional[Any] = b""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(__magic_name__ ) , 6 ) ).encode()
+ padding
)
def lowercase ( __magic_name__ ):
'''simple docstring'''
if not isinstance(__magic_name__ , __magic_name__ ) and not isinstance(__magic_name__ , __magic_name__ ):
UpperCAmelCase : Union[str, Any] = (
"argument should be a bytes-like object or ASCII string, "
F"not '{encoded_data.__class__.__name__}'"
)
raise TypeError(__magic_name__ )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(__magic_name__ , __magic_name__ ):
try:
UpperCAmelCase : Any = encoded_data.decode("utf-8" )
except UnicodeDecodeError:
raise ValueError("base64 encoded data should only contain ASCII characters" )
UpperCAmelCase : Optional[int] = encoded_data.count("=" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(__magic_name__ ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
UpperCAmelCase : Dict = encoded_data[:-padding]
UpperCAmelCase : Any = "".join(
bin(B64_CHARSET.index(__magic_name__ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
UpperCAmelCase : str = "".join(
bin(B64_CHARSET.index(__magic_name__ ) )[2:].zfill(6 ) for char in encoded_data )
UpperCAmelCase : Tuple = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(__magic_name__ ) , 8 )
]
return bytes(__magic_name__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 713 |
'''simple docstring'''
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : torch.FloatTensor
SCREAMING_SNAKE_CASE__ : Optional[torch.FloatTensor] = None
def lowercase ( __magic_name__ , __magic_name__=0.9_9_9 , __magic_name__="cosine" , ):
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(__magic_name__ ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__magic_name__ ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(F"Unsupported alpha_tranform_type: {alpha_transform_type}" )
UpperCAmelCase : List[str] = []
for i in range(__magic_name__ ):
UpperCAmelCase : str = i / num_diffusion_timesteps
UpperCAmelCase : List[str] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__magic_name__ ) / alpha_bar_fn(__magic_name__ ) , __magic_name__ ) )
return torch.tensor(__magic_name__ , dtype=torch.floataa )
class UpperCamelCase__ ( lowercase__ , lowercase__ ):
"""simple docstring"""
@register_to_config
def __init__( self , snake_case = 1_0_0_0 , snake_case = "fixed_small_log" , snake_case = True , snake_case = 1.0 , snake_case = "epsilon" , snake_case = "squaredcos_cap_v2" , ):
'''simple docstring'''
if beta_schedule != "squaredcos_cap_v2":
raise ValueError("UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'" )
UpperCAmelCase : List[str] = betas_for_alpha_bar(snake_case )
UpperCAmelCase : Any = 1.0 - self.betas
UpperCAmelCase : str = torch.cumprod(self.alphas , dim=0 )
UpperCAmelCase : Tuple = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
UpperCAmelCase : List[Any] = 1.0
# setable values
UpperCAmelCase : List[str] = None
UpperCAmelCase : Tuple = torch.from_numpy(np.arange(0 , snake_case )[::-1].copy() )
UpperCAmelCase : int = variance_type
def A_ ( self , snake_case , snake_case = None ):
'''simple docstring'''
return sample
def A_ ( self , snake_case , snake_case = None ):
'''simple docstring'''
UpperCAmelCase : Dict = num_inference_steps
UpperCAmelCase : str = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
UpperCAmelCase : Union[str, Any] = (np.arange(0 , snake_case ) * step_ratio).round()[::-1].copy().astype(np.intaa )
UpperCAmelCase : int = torch.from_numpy(snake_case ).to(snake_case )
def A_ ( self , snake_case , snake_case=None , snake_case=None , snake_case=None ):
'''simple docstring'''
if prev_timestep is None:
UpperCAmelCase : Optional[int] = t - 1
UpperCAmelCase : Tuple = self.alphas_cumprod[t]
UpperCAmelCase : List[str] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
UpperCAmelCase : Any = 1 - alpha_prod_t
UpperCAmelCase : List[Any] = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
UpperCAmelCase : Optional[Any] = self.betas[t]
else:
UpperCAmelCase : Optional[int] = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
UpperCAmelCase : List[Any] = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
UpperCAmelCase : str = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
UpperCAmelCase : Dict = torch.log(torch.clamp(snake_case , min=1e-20 ) )
UpperCAmelCase : Union[str, Any] = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
UpperCAmelCase : Tuple = variance.log()
UpperCAmelCase : int = beta.log()
UpperCAmelCase : Optional[Any] = (predicted_variance + 1) / 2
UpperCAmelCase : Dict = frac * max_log + (1 - frac) * min_log
return variance
def A_ ( self , snake_case , snake_case , snake_case , snake_case = None , snake_case=None , snake_case = True , ):
'''simple docstring'''
UpperCAmelCase : Any = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
UpperCAmelCase , UpperCAmelCase : str = torch.split(snake_case , sample.shape[1] , dim=1 )
else:
UpperCAmelCase : Tuple = None
# 1. compute alphas, betas
if prev_timestep is None:
UpperCAmelCase : List[str] = t - 1
UpperCAmelCase : Tuple = self.alphas_cumprod[t]
UpperCAmelCase : Optional[Any] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
UpperCAmelCase : Optional[Any] = 1 - alpha_prod_t
UpperCAmelCase : Union[str, Any] = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
UpperCAmelCase : List[Any] = self.betas[t]
UpperCAmelCase : Optional[int] = self.alphas[t]
else:
UpperCAmelCase : Union[str, Any] = 1 - alpha_prod_t / alpha_prod_t_prev
UpperCAmelCase : List[Any] = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
UpperCAmelCase : Optional[Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
UpperCAmelCase : Tuple = model_output
else:
raise ValueError(
f"prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`"
" for the UnCLIPScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
UpperCAmelCase : Union[str, Any] = torch.clamp(
snake_case , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase : Tuple = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
UpperCAmelCase : Dict = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase : int = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
UpperCAmelCase : str = 0
if t > 0:
UpperCAmelCase : Union[str, Any] = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=snake_case , device=model_output.device )
UpperCAmelCase : Union[str, Any] = self._get_variance(
snake_case , predicted_variance=snake_case , prev_timestep=snake_case , )
if self.variance_type == "fixed_small_log":
UpperCAmelCase : Tuple = variance
elif self.variance_type == "learned_range":
UpperCAmelCase : Dict = (0.5 * variance).exp()
else:
raise ValueError(
f"variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`"
" for the UnCLIPScheduler." )
UpperCAmelCase : int = variance * variance_noise
UpperCAmelCase : Dict = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=snake_case , pred_original_sample=snake_case )
def A_ ( self , snake_case , snake_case , snake_case , ):
'''simple docstring'''
UpperCAmelCase : str = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
UpperCAmelCase : str = timesteps.to(original_samples.device )
UpperCAmelCase : List[str] = alphas_cumprod[timesteps] ** 0.5
UpperCAmelCase : Optional[Any] = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
UpperCAmelCase : int = sqrt_alpha_prod.unsqueeze(-1 )
UpperCAmelCase : Any = (1 - alphas_cumprod[timesteps]) ** 0.5
UpperCAmelCase : int = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
UpperCAmelCase : Tuple = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
UpperCAmelCase : Any = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 609 | 0 |
'''simple docstring'''
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class SCREAMING_SNAKE_CASE (a__ ):
# to overwrite at feature extractactor specific tests
lowerCAmelCase = None
lowerCAmelCase = None
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return self.feat_extract_tester.prepare_feat_extract_dict()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = self.feature_extraction_class(**self.feat_extract_dict)
self.assertTrue(hasattr(_UpperCAmelCase , 'feature_size'))
self.assertTrue(hasattr(_UpperCAmelCase , 'sampling_rate'))
self.assertTrue(hasattr(_UpperCAmelCase , 'padding_value'))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = self.feat_extract_tester.prepare_inputs_for_common()
__A : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict)
__A : Optional[Any] = feat_extract.model_input_names[0]
__A : Optional[Any] = BatchFeature({input_name: speech_inputs})
self.assertTrue(all(len(_UpperCAmelCase) == len(_UpperCAmelCase) for x, y in zip(_UpperCAmelCase , processed_features[input_name])))
__A : List[Any] = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_UpperCAmelCase)
__A : Optional[int] = BatchFeature({input_name: speech_inputs} , tensor_type='np')
__A : Dict = processed_features[input_name]
if len(batch_features_input.shape) < 3:
__A : str = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size))
@require_torch
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_UpperCAmelCase)
__A : Tuple = self.feature_extraction_class(**self.feat_extract_dict)
__A : Dict = feat_extract.model_input_names[0]
__A : Optional[Any] = BatchFeature({input_name: speech_inputs} , tensor_type='pt')
__A : Tuple = processed_features[input_name]
if len(batch_features_input.shape) < 3:
__A : int = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size))
@require_tf
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_UpperCAmelCase)
__A : str = self.feature_extraction_class(**self.feat_extract_dict)
__A : Dict = feat_extract.model_input_names[0]
__A : int = BatchFeature({input_name: speech_inputs} , tensor_type='tf')
__A : Tuple = processed_features[input_name]
if len(batch_features_input.shape) < 3:
__A : List[str] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase=False):
'''simple docstring'''
def _inputs_have_equal_length(_UpperCAmelCase):
__A : str = len(input[0])
for input_slice in input[1:]:
if len(_UpperCAmelCase) != length:
return False
return True
def _inputs_are_equal(_UpperCAmelCase , _UpperCAmelCase):
if len(_UpperCAmelCase) != len(_UpperCAmelCase):
return False
for input_slice_a, input_slice_a in zip(_UpperCAmelCase , _UpperCAmelCase):
if not np.allclose(np.asarray(_UpperCAmelCase) , np.asarray(_UpperCAmelCase) , atol=1e-3):
return False
return True
__A : str = self.feature_extraction_class(**self.feat_extract_dict)
__A : Union[str, Any] = self.feat_extract_tester.prepare_inputs_for_common(numpify=_UpperCAmelCase)
__A : Optional[Any] = feat_extract.model_input_names[0]
__A : Tuple = BatchFeature({input_name: speech_inputs})
__A : str = self.feat_extract_tester.seq_length_diff
__A : Tuple = self.feat_extract_tester.max_seq_length + pad_diff
__A : Union[str, Any] = self.feat_extract_tester.min_seq_length
__A : Union[str, Any] = self.feat_extract_tester.batch_size
__A : Any = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
__A : str = feat_extract.pad(_UpperCAmelCase , padding=_UpperCAmelCase)
__A : Optional[Any] = input_a[input_name]
__A : List[Any] = feat_extract.pad(_UpperCAmelCase , padding='longest')
__A : Optional[int] = input_a[input_name]
__A : Optional[Any] = feat_extract.pad(_UpperCAmelCase , padding='max_length' , max_length=len(speech_inputs[-1]))
__A : List[str] = input_a[input_name]
__A : Tuple = feat_extract.pad(_UpperCAmelCase , padding='longest' , return_tensors='np')
__A : List[str] = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(_UpperCAmelCase):
feat_extract.pad(_UpperCAmelCase , padding='max_length')[input_name]
__A : Any = feat_extract.pad(
_UpperCAmelCase , padding='max_length' , max_length=_UpperCAmelCase , return_tensors='np')
__A : str = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(_UpperCAmelCase))
self.assertTrue(_inputs_have_equal_length(_UpperCAmelCase))
self.assertTrue(_inputs_have_equal_length(_UpperCAmelCase))
self.assertTrue(_inputs_are_equal(_UpperCAmelCase , _UpperCAmelCase))
self.assertTrue(len(input_a[0]) == pad_min_length)
self.assertTrue(len(input_a[1]) == pad_min_length + pad_diff)
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0])))
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length))
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size)
# test padding for `pad_to_multiple_of` for List[int] + numpy
__A : Dict = feat_extract.pad(_UpperCAmelCase , pad_to_multiple_of=10)
__A : Optional[int] = input_a[input_name]
__A : Any = feat_extract.pad(_UpperCAmelCase , padding='longest' , pad_to_multiple_of=10)
__A : Optional[Any] = input_a[input_name]
__A : List[str] = feat_extract.pad(
_UpperCAmelCase , padding='max_length' , pad_to_multiple_of=10 , max_length=_UpperCAmelCase)
__A : Optional[int] = input_a[input_name]
__A : Optional[Any] = feat_extract.pad(
_UpperCAmelCase , padding='max_length' , pad_to_multiple_of=10 , max_length=_UpperCAmelCase , return_tensors='np' , )
__A : str = input_a[input_name]
self.assertTrue(all(len(_UpperCAmelCase) % 10 == 0 for x in input_a))
self.assertTrue(_inputs_are_equal(_UpperCAmelCase , _UpperCAmelCase))
__A : List[str] = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(_UpperCAmelCase) == expected_mult_pad_length for x in input_a))
self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length))
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size)
# Check padding value is correct
__A : int = (np.ones(self.feat_extract_tester.feature_size) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0])[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length))
< 1e-3)
self.assertTrue(
abs(
np.asarray(input_a[1])[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff))
< 1e-3)
self.assertTrue(
abs(
np.asarray(input_a[2])[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff))
< 1e-3)
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length)) < 1e-3)
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length))
< 1e-3)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase=False):
'''simple docstring'''
def _inputs_have_equal_length(_UpperCAmelCase):
__A : Optional[Any] = len(input[0])
for input_slice in input[1:]:
if len(_UpperCAmelCase) != length:
return False
return True
def _inputs_are_equal(_UpperCAmelCase , _UpperCAmelCase):
if len(_UpperCAmelCase) != len(_UpperCAmelCase):
return False
for input_slice_a, input_slice_a in zip(_UpperCAmelCase , _UpperCAmelCase):
if not np.allclose(np.asarray(_UpperCAmelCase) , np.asarray(_UpperCAmelCase) , atol=1e-3):
return False
return True
__A : str = self.feature_extraction_class(**self.feat_extract_dict)
__A : Optional[Any] = self.feat_extract_tester.prepare_inputs_for_common(numpify=_UpperCAmelCase)
__A : Optional[int] = feat_extract.model_input_names[0]
__A : Union[str, Any] = BatchFeature({input_name: speech_inputs})
# truncate to smallest
__A : Tuple = feat_extract.pad(
_UpperCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]) , truncation=_UpperCAmelCase)
__A : Tuple = input_a[input_name]
__A : int = feat_extract.pad(_UpperCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]))
__A : int = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(_UpperCAmelCase))
self.assertFalse(_inputs_have_equal_length(_UpperCAmelCase))
# truncate to smallest with np
__A : Tuple = feat_extract.pad(
_UpperCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]) , return_tensors='np' , truncation=_UpperCAmelCase , )
__A : List[Any] = input_a[input_name]
__A : Tuple = feat_extract.pad(
_UpperCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]) , return_tensors='np')
__A : Any = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(_UpperCAmelCase))
self.assertTrue(input_a.shape[1] == len(speech_inputs[0]))
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(_UpperCAmelCase))
# truncate to middle
__A : str = feat_extract.pad(
_UpperCAmelCase , padding='max_length' , max_length=len(speech_inputs[1]) , truncation=_UpperCAmelCase , return_tensors='np' , )
__A : List[str] = input_a[input_name]
__A : Optional[Any] = feat_extract.pad(
_UpperCAmelCase , padding='max_length' , max_length=len(speech_inputs[1]) , truncation=_UpperCAmelCase)
__A : Any = input_a[input_name]
__A : Any = feat_extract.pad(
_UpperCAmelCase , padding='max_length' , max_length=len(speech_inputs[1]) , return_tensors='np')
__A : Tuple = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1]))
self.assertTrue(_inputs_have_equal_length(_UpperCAmelCase))
self.assertTrue(_inputs_have_equal_length(_UpperCAmelCase))
self.assertTrue(_inputs_are_equal(_UpperCAmelCase , _UpperCAmelCase))
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(_UpperCAmelCase))
self.assertTrue(len(input_a[-1]) == len(speech_inputs[-1]))
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_UpperCAmelCase):
feat_extract.pad(_UpperCAmelCase , truncation=_UpperCAmelCase)[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_UpperCAmelCase):
feat_extract.pad(_UpperCAmelCase , padding='longest' , truncation=_UpperCAmelCase)[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_UpperCAmelCase):
feat_extract.pad(_UpperCAmelCase , padding='longest' , truncation=_UpperCAmelCase)[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(_UpperCAmelCase):
feat_extract.pad(_UpperCAmelCase , padding='max_length' , truncation=_UpperCAmelCase)[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
__A : int = 12
__A : Optional[Any] = feat_extract.pad(
_UpperCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]) , pad_to_multiple_of=_UpperCAmelCase , truncation=_UpperCAmelCase , )
__A : str = input_a[input_name]
__A : Union[str, Any] = feat_extract.pad(
_UpperCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]) , pad_to_multiple_of=_UpperCAmelCase , )
__A : Optional[Any] = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
__A : List[str] = len(speech_inputs[0])
if expected_length % pad_to_multiple_of != 0:
__A : Optional[Any] = ((len(speech_inputs[0]) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0]) == expected_length)
self.assertTrue(_inputs_have_equal_length(_UpperCAmelCase))
self.assertFalse(_inputs_have_equal_length(_UpperCAmelCase))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self._check_padding(numpify=_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self._check_padding(numpify=_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self._check_truncation(numpify=_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self._check_truncation(numpify=_UpperCAmelCase)
@require_torch
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = self.feature_extraction_class(**self.feat_extract_dict)
__A : int = self.feat_extract_tester.prepare_inputs_for_common()
__A : Union[str, Any] = feat_extract.model_input_names[0]
__A : str = BatchFeature({input_name: speech_inputs})
__A : int = feat_extract.pad(_UpperCAmelCase , padding='longest' , return_tensors='np')[input_name]
__A : Optional[int] = feat_extract.pad(_UpperCAmelCase , padding='longest' , return_tensors='pt')[input_name]
self.assertTrue(abs(input_np.astype(np.floataa).sum() - input_pt.numpy().astype(np.floataa).sum()) < 1e-2)
@require_tf
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict)
__A : Tuple = self.feat_extract_tester.prepare_inputs_for_common()
__A : Optional[Any] = feat_extract.model_input_names[0]
__A : Union[str, Any] = BatchFeature({input_name: speech_inputs})
__A : Optional[Any] = feat_extract.pad(_UpperCAmelCase , padding='longest' , return_tensors='np')[input_name]
__A : str = feat_extract.pad(_UpperCAmelCase , padding='longest' , return_tensors='tf')[input_name]
self.assertTrue(abs(input_np.astype(np.floataa).sum() - input_tf.numpy().astype(np.floataa).sum()) < 1e-2)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = self.feat_extract_dict
__A : List[str] = True
__A : List[Any] = self.feature_extraction_class(**_UpperCAmelCase)
__A : Optional[Any] = self.feat_extract_tester.prepare_inputs_for_common()
__A : Optional[Any] = [len(_UpperCAmelCase) for x in speech_inputs]
__A : Optional[Any] = feat_extract.model_input_names[0]
__A : Optional[Any] = BatchFeature({input_name: speech_inputs})
__A : List[str] = feat_extract.pad(_UpperCAmelCase , padding='longest' , return_tensors='np')
self.assertIn('attention_mask' , _UpperCAmelCase)
self.assertListEqual(list(processed.attention_mask.shape) , list(processed[input_name].shape[:2]))
self.assertListEqual(processed.attention_mask.sum(-1).tolist() , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[str] = self.feat_extract_dict
__A : Optional[int] = True
__A : List[str] = self.feature_extraction_class(**_UpperCAmelCase)
__A : Union[str, Any] = self.feat_extract_tester.prepare_inputs_for_common()
__A : str = [len(_UpperCAmelCase) for x in speech_inputs]
__A : Optional[Any] = feat_extract.model_input_names[0]
__A : List[Any] = BatchFeature({input_name: speech_inputs})
__A : Dict = min(_UpperCAmelCase)
__A : List[str] = feat_extract.pad(
_UpperCAmelCase , padding='max_length' , max_length=_UpperCAmelCase , truncation=_UpperCAmelCase , return_tensors='np')
self.assertIn('attention_mask' , _UpperCAmelCase)
self.assertListEqual(
list(processed_pad.attention_mask.shape) , [processed_pad[input_name].shape[0], max_length])
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1).tolist() , [max_length for x in speech_inputs]) | 8 |
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class SCREAMING_SNAKE_CASE__ :
def __init__( self : int , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : List[Any]="resnet50" , SCREAMING_SNAKE_CASE__ : Optional[int]=3 , SCREAMING_SNAKE_CASE__ : Any=3_2 , SCREAMING_SNAKE_CASE__ : Tuple=3 , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : List[str]=True , ) -> List[str]:
a_ : Any = parent
a_ : Tuple = out_indices if out_indices is not None else [4]
a_ : Tuple = stage_names
a_ : List[Any] = out_features
a_ : List[str] = backbone
a_ : List[str] = batch_size
a_ : str = image_size
a_ : Tuple = num_channels
a_ : Tuple = use_pretrained_backbone
a_ : str = is_training
def SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
a_ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a_ : Optional[Any] = self.get_config()
return config, pixel_values
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str:
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def SCREAMING_SNAKE_CASE ( self : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Union[str, Any]:
a_ : List[str] = TimmBackbone(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
with torch.no_grad():
a_ : str = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 1_4, 1_4) , )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict:
a_ : List[Any] = self.prepare_config_and_inputs()
a_ , a_ : List[str] = config_and_inputs
a_ : Union[str, Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class SCREAMING_SNAKE_CASE__ ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ):
snake_case__ : Tuple = (TimmBackbone,) if is_torch_available() else ()
snake_case__ : Optional[int] = {'''feature-extraction''': TimmBackbone} if is_torch_available() else {}
snake_case__ : Any = False
snake_case__ : Optional[int] = False
snake_case__ : Optional[int] = False
snake_case__ : List[str] = False
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
a_ : List[Any] = TimmBackboneModelTester(self )
a_ : Optional[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , has_text_modality=SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple:
a_ : Union[str, Any] = 'resnet18'
a_ : Dict = 'microsoft/resnet-18'
a_ : Union[str, Any] = AutoBackbone.from_pretrained(SCREAMING_SNAKE_CASE__ , use_timm_backbone=SCREAMING_SNAKE_CASE__ )
a_ : Dict = AutoBackbone.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
a_ : Dict = AutoBackbone.from_pretrained(SCREAMING_SNAKE_CASE__ , use_timm_backbone=SCREAMING_SNAKE_CASE__ , out_indices=[1, 2, 3] )
a_ : int = AutoBackbone.from_pretrained(SCREAMING_SNAKE_CASE__ , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip('TimmBackbone doesn\'t support feed forward chunking' )
def SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
pass
@unittest.skip('TimmBackbone doesn\'t have num_hidden_layers attribute' )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]:
pass
@unittest.skip('TimmBackbone initialization is managed on the timm side' )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple:
pass
@unittest.skip('TimmBackbone model cannot be created without specifying a backbone checkpoint' )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
pass
@unittest.skip('TimmBackbone doesn\'t have hidden size info in its configuration.' )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
pass
@unittest.skip('TimmBackbone doesn\'t support output_attentions.' )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Any:
pass
@unittest.skip('Safetensors is not supported by timm.' )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any:
pass
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
a_ , a_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ : List[Any] = model_class(SCREAMING_SNAKE_CASE__ )
a_ : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a_ : List[Any] = [*signature.parameters.keys()]
a_ : Dict = ['pixel_values']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
a_ , a_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
a_ : List[str] = True
a_ : Dict = self.has_attentions
# no need to test all models as different heads yield the same functionality
a_ : Optional[Any] = self.all_model_classes[0]
a_ : Dict = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
a_ : List[str] = self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
a_ : Any = model(**SCREAMING_SNAKE_CASE__ )
a_ : Any = outputs[0][-1]
# Encoder-/Decoder-only models
a_ : str = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
a_ : Optional[int] = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]:
a_ , a_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ : Optional[Any] = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
a_ : str = model(**SCREAMING_SNAKE_CASE__ )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
a_ : Tuple = copy.deepcopy(SCREAMING_SNAKE_CASE__ )
a_ : Tuple = None
a_ : str = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
a_ : Union[str, Any] = model(**SCREAMING_SNAKE_CASE__ )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
a_ : Dict = copy.deepcopy(SCREAMING_SNAKE_CASE__ )
a_ : List[str] = False
a_ : Optional[int] = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
a_ : Optional[int] = model(**SCREAMING_SNAKE_CASE__ )
| 570 | 0 |
'''simple docstring'''
from __future__ import annotations
from typing import TypedDict
class __A ( UpperCamelCase__ ):
a__ : str
a__ : int
def lowerCAmelCase_ ( snake_case_ : str ) -> list[str]:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ):
raise TypeError("The parameter s type must be str." )
return [s[i:] + s[:i] for i in range(len(snake_case_ ) )]
def lowerCAmelCase_ ( snake_case_ : str ) -> BWTTransformDict:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ):
raise TypeError("The parameter s type must be str." )
if not s:
raise ValueError("The parameter s must not be empty." )
UpperCAmelCase_ = all_rotations(snake_case_ )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
UpperCAmelCase_ = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(snake_case_ ),
}
return response
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : int ) -> str:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ):
raise TypeError("The parameter bwt_string type must be str." )
if not bwt_string:
raise ValueError("The parameter bwt_string must not be empty." )
try:
UpperCAmelCase_ = int(snake_case_ )
except ValueError:
raise TypeError(
"The parameter idx_original_string type must be int or passive"
" of cast to int." )
if idx_original_string < 0:
raise ValueError("The parameter idx_original_string must not be lower than 0." )
if idx_original_string >= len(snake_case_ ):
raise ValueError(
"The parameter idx_original_string must be lower than" " len(bwt_string)." )
UpperCAmelCase_ = [""] * len(snake_case_ )
for _ in range(len(snake_case_ ) ):
for i in range(len(snake_case_ ) ):
UpperCAmelCase_ = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: List[Any] ='Provide a string that I will generate its BWT transform: '
SCREAMING_SNAKE_CASE_: Dict =input(entry_msg).strip()
SCREAMING_SNAKE_CASE_: str =bwt_transform(s)
print(
f"Burrows Wheeler transform for string '{s}' results "
f"in '{result['bwt_string']}'"
)
SCREAMING_SNAKE_CASE_: Optional[int] =reverse_bwt(result['bwt_string'], result['idx_original_string'])
print(
f"Reversing Burrows Wheeler transform for entry '{result['bwt_string']}' "
f"we get original string '{original_string}'"
)
| 415 | '''simple docstring'''
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __A :
@staticmethod
def _lowercase (*__a : Any , **__a : Union[str, Any] ):
pass
@is_pipeline_test
@require_vision
class __A ( unittest.TestCase ):
@require_torch
def _lowercase (self : int ):
UpperCAmelCase_ = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , )
UpperCAmelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
UpperCAmelCase_ = image_classifier(__a , candidate_labels=["a", "b", "c"] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(__a ) , [
[{"score": 0.3_33, "label": "a"}, {"score": 0.3_33, "label": "b"}, {"score": 0.3_33, "label": "c"}],
[{"score": 0.3_33, "label": "a"}, {"score": 0.3_33, "label": "c"}, {"score": 0.3_33, "label": "b"}],
] , )
UpperCAmelCase_ = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 )
self.assertEqual(
nested_simplify(__a ) , [
[
{"score": 0.3_33, "label": ANY(__a )},
{"score": 0.3_33, "label": ANY(__a )},
{"score": 0.3_33, "label": ANY(__a )},
],
[
{"score": 0.3_33, "label": ANY(__a )},
{"score": 0.3_33, "label": ANY(__a )},
{"score": 0.3_33, "label": ANY(__a )},
],
[
{"score": 0.3_33, "label": ANY(__a )},
{"score": 0.3_33, "label": ANY(__a )},
{"score": 0.3_33, "label": ANY(__a )},
],
[
{"score": 0.3_33, "label": ANY(__a )},
{"score": 0.3_33, "label": ANY(__a )},
{"score": 0.3_33, "label": ANY(__a )},
],
[
{"score": 0.3_33, "label": ANY(__a )},
{"score": 0.3_33, "label": ANY(__a )},
{"score": 0.3_33, "label": ANY(__a )},
],
] , )
@require_tf
def _lowercase (self : List[str] ):
UpperCAmelCase_ = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , framework="tf" )
UpperCAmelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
UpperCAmelCase_ = image_classifier(__a , candidate_labels=["a", "b", "c"] )
self.assertEqual(
nested_simplify(__a ) , [{"score": 0.3_33, "label": "a"}, {"score": 0.3_33, "label": "b"}, {"score": 0.3_33, "label": "c"}] , )
UpperCAmelCase_ = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 )
self.assertEqual(
nested_simplify(__a ) , [
[
{"score": 0.3_33, "label": ANY(__a )},
{"score": 0.3_33, "label": ANY(__a )},
{"score": 0.3_33, "label": ANY(__a )},
],
[
{"score": 0.3_33, "label": ANY(__a )},
{"score": 0.3_33, "label": ANY(__a )},
{"score": 0.3_33, "label": ANY(__a )},
],
[
{"score": 0.3_33, "label": ANY(__a )},
{"score": 0.3_33, "label": ANY(__a )},
{"score": 0.3_33, "label": ANY(__a )},
],
[
{"score": 0.3_33, "label": ANY(__a )},
{"score": 0.3_33, "label": ANY(__a )},
{"score": 0.3_33, "label": ANY(__a )},
],
[
{"score": 0.3_33, "label": ANY(__a )},
{"score": 0.3_33, "label": ANY(__a )},
{"score": 0.3_33, "label": ANY(__a )},
],
] , )
@slow
@require_torch
def _lowercase (self : Dict ):
UpperCAmelCase_ = pipeline(
task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , )
# This is an image of 2 cats with remotes and no planes
UpperCAmelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
UpperCAmelCase_ = image_classifier(__a , candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(__a ) , [
{"score": 0.5_11, "label": "remote"},
{"score": 0.4_85, "label": "cat"},
{"score": 0.0_04, "label": "plane"},
] , )
UpperCAmelCase_ = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 )
self.assertEqual(
nested_simplify(__a ) , [
[
{"score": 0.5_11, "label": "remote"},
{"score": 0.4_85, "label": "cat"},
{"score": 0.0_04, "label": "plane"},
],
]
* 5 , )
@slow
@require_tf
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = pipeline(
task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , framework="tf" )
# This is an image of 2 cats with remotes and no planes
UpperCAmelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
UpperCAmelCase_ = image_classifier(__a , candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(__a ) , [
{"score": 0.5_11, "label": "remote"},
{"score": 0.4_85, "label": "cat"},
{"score": 0.0_04, "label": "plane"},
] , )
UpperCAmelCase_ = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 )
self.assertEqual(
nested_simplify(__a ) , [
[
{"score": 0.5_11, "label": "remote"},
{"score": 0.4_85, "label": "cat"},
{"score": 0.0_04, "label": "plane"},
],
]
* 5 , )
| 415 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__A : List[str] = {
"configuration_tapas": ["TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP", "TapasConfig"],
"tokenization_tapas": ["TapasTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Any = [
"TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TapasForMaskedLM",
"TapasForQuestionAnswering",
"TapasForSequenceClassification",
"TapasModel",
"TapasPreTrainedModel",
"load_tf_weights_in_tapas",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[Any] = [
"TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFTapasForMaskedLM",
"TFTapasForQuestionAnswering",
"TFTapasForSequenceClassification",
"TFTapasModel",
"TFTapasPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
__A : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 27 |
UpperCAmelCase_ : dict[str, float] = {
"joule": 1.0,
"kilojoule": 1_000,
"megajoule": 1_000_000,
"gigajoule": 1_000_000_000,
"wattsecond": 1.0,
"watthour": 3_600,
"kilowatthour": 3_600_000,
"newtonmeter": 1.0,
"calorie_nutr": 4_186.8,
"kilocalorie_nutr": 4_186_800.00,
"electronvolt": 1.6_0217_6634e-19,
"britishthermalunit_it": 1_055.05_585,
"footpound": 1.35_5818,
}
def __SCREAMING_SNAKE_CASE ( a__ : str ,a__ : str ,a__ : float ) -> float:
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
__A : Optional[int] = (
f"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"""
f"""Valid values are: {", ".join(a__ )}"""
)
raise ValueError(a__ )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 | 0 |
"""simple docstring"""
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
_lowerCAmelCase = 2_0_0
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
_lowerCAmelCase = 5_0
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
_lowerCAmelCase = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1_0_0_0))
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = len([g for position, g in enumerate(_lowerCamelCase ) if g == main_target[position]] )
return (item, float(_lowerCamelCase ))
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Tuple = random.randint(0 , len(_lowerCamelCase ) - 1 )
_lowerCAmelCase : List[str] = parent_a[:random_slice] + parent_a[random_slice:]
_lowerCAmelCase : Optional[Any] = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = list(_lowerCamelCase )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
_lowerCAmelCase : List[Any] = random.choice(_lowerCamelCase )
return "".join(_lowerCamelCase )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = []
# Generate more children proportionally to the fitness score.
_lowerCAmelCase : List[str] = int(parent_a[1] * 100 ) + 1
_lowerCAmelCase : List[Any] = 10 if child_n >= 10 else child_n
for _ in range(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = population_score[random.randint(0 , _lowerCamelCase )][0]
_lowerCAmelCase, _lowerCAmelCase : int = crossover(parent_a[0] , _lowerCamelCase )
# Append new string to the population list.
pop.append(mutate(_lowerCamelCase , _lowerCamelCase ) )
pop.append(mutate(_lowerCamelCase , _lowerCamelCase ) )
return pop
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = True ):
'''simple docstring'''
if N_POPULATION < N_SELECTED:
_lowerCAmelCase : Tuple = f"""{N_POPULATION} must be bigger than {N_SELECTED}"""
raise ValueError(_lowerCamelCase )
# Verify that the target contains no genes besides the ones inside genes variable.
_lowerCAmelCase : Optional[int] = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
_lowerCAmelCase : Any = f"""{not_in_genes_list} is not in genes list, evolution cannot converge"""
raise ValueError(_lowerCamelCase )
# Generate random starting population.
_lowerCAmelCase : List[Any] = []
for _ in range(_lowerCamelCase ):
population.append(''.join([random.choice(_lowerCamelCase ) for i in range(len(_lowerCamelCase ) )] ) )
# Just some logs to know what the algorithms is doing.
_lowerCAmelCase, _lowerCAmelCase : List[str] = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(_lowerCamelCase )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
_lowerCAmelCase : Any = [evaluate(_lowerCamelCase , _lowerCamelCase ) for item in population]
# Check if there is a matching evolution.
_lowerCAmelCase : Tuple = sorted(_lowerCamelCase , key=lambda _lowerCamelCase : x[1] , reverse=_lowerCamelCase )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
f"""\nGeneration: {generation}"""
f"""\nTotal Population:{total_population}"""
f"""\nBest score: {population_score[0][1]}"""
f"""\nBest string: {population_score[0][0]}""" )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
_lowerCAmelCase : Tuple = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(_lowerCamelCase )
# Normalize population score to be between 0 and 1.
_lowerCAmelCase : int = [
(item, score / len(_lowerCamelCase )) for item, score in population_score
]
# This is selection
for i in range(_lowerCamelCase ):
population.extend(select(population_score[int(_lowerCamelCase )] , _lowerCamelCase , _lowerCamelCase ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(_lowerCamelCase ) > N_POPULATION:
break
if __name__ == "__main__":
_lowerCAmelCase = (
"""This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"""
)
_lowerCAmelCase = list(
""" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"""
"""nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"""
)
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = basic(target_str, genes_list)
print(
F'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
)
| 16 |
"""simple docstring"""
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
_lowerCAmelCase = {"""UserAgent""": UserAgent().random}
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = script.contents[0]
_lowerCAmelCase : Union[str, Any] = json.loads(data[data.find('{"config"' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class __UpperCamelCase :
def __init__( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = F"""https://www.instagram.com/{username}/"""
_lowerCAmelCase : str = self.get_json()
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = requests.get(self.url ,headers=_A ).text
_lowerCAmelCase : Optional[Any] = BeautifulSoup(_A ,'html.parser' ).find_all('script' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self ):
'''simple docstring'''
return F"""{self.__class__.__name__}('{self.username}')"""
def __str__( self ):
'''simple docstring'''
return F"""{self.fullname} ({self.username}) is {self.biography}"""
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["username"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["full_name"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["biography"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["business_email"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["external_url"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["edge_followed_by"]["count"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["edge_follow"]["count"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["profile_pic_url_hd"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["is_verified"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["is_private"]
def lowerCamelCase__ ( _lowerCamelCase = "github" ):
'''simple docstring'''
import os
if os.environ.get('CI' ):
return # test failing on GitHub Actions
_lowerCAmelCase : Tuple = InstagramUser(_lowerCamelCase )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , _lowerCamelCase )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "[email protected]"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('https://instagram.' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase = InstagramUser("""github""")
print(instagram_user)
print(F'''{instagram_user.number_of_posts = }''')
print(F'''{instagram_user.number_of_followers = }''')
print(F'''{instagram_user.number_of_followings = }''')
print(F'''{instagram_user.email = }''')
print(F'''{instagram_user.website = }''')
print(F'''{instagram_user.profile_picture_url = }''')
print(F'''{instagram_user.is_verified = }''')
print(F'''{instagram_user.is_private = }''')
| 16 | 1 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {"""vocab_file""": """spiece.model"""}
SCREAMING_SNAKE_CASE_ = {
"""vocab_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""",
}
}
SCREAMING_SNAKE_CASE_ = {
"""xlnet-base-cased""": None,
"""xlnet-large-cased""": None,
}
# Segments (not really needed)
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = 4
class snake_case_ ( a_ ):
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = "left"
def __init__( self , a_ , a_=False , a_=True , a_=False , a_="<s>" , a_="</s>" , a_="<unk>" , a_="<sep>" , a_="<pad>" , a_="<cls>" , a_="<mask>" , a_=["<eop>", "<eod>"] , a_ = None , **a_ , ):
# Mask token behave like a normal word, i.e. include the space before it
a_ : List[Any] = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else mask_token
a_ : Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=a_ , remove_space=a_ , keep_accents=a_ , bos_token=a_ , eos_token=a_ , unk_token=a_ , sep_token=a_ , pad_token=a_ , cls_token=a_ , mask_token=a_ , additional_special_tokens=a_ , sp_model_kwargs=self.sp_model_kwargs , **a_ , )
a_ : List[str] = 3
a_ : List[Any] = do_lower_case
a_ : List[Any] = remove_space
a_ : List[Any] = keep_accents
a_ : Tuple = vocab_file
a_ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(a_ )
@property
def snake_case_ ( self ):
return len(self.sp_model )
def snake_case_ ( self ):
a_ : Optional[Any] = {self.convert_ids_to_tokens(a_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
a_ : str = self.__dict__.copy()
a_ : Optional[int] = None
return state
def __setstate__( self , a_ ):
a_ : Dict = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
a_ : str = {}
a_ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def snake_case_ ( self , a_ ):
if self.remove_space:
a_ : str = " ".join(inputs.strip().split() )
else:
a_ : Optional[int] = inputs
a_ : int = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
a_ : List[Any] = unicodedata.normalize("NFKD" , a_ )
a_ : int = "".join([c for c in outputs if not unicodedata.combining(a_ )] )
if self.do_lower_case:
a_ : str = outputs.lower()
return outputs
def snake_case_ ( self , a_ ):
a_ : Optional[int] = self.preprocess_text(a_ )
a_ : int = self.sp_model.encode(a_ , out_type=a_ )
a_ : Tuple = []
for piece in pieces:
if len(a_ ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
a_ : Union[str, Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(a_ , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
a_ : List[str] = cur_pieces[1:]
else:
a_ : int = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(a_ )
else:
new_pieces.append(a_ )
return new_pieces
def snake_case_ ( self , a_ ):
return self.sp_model.PieceToId(a_ )
def snake_case_ ( self , a_ ):
return self.sp_model.IdToPiece(a_ )
def snake_case_ ( self , a_ ):
a_ : Optional[Any] = "".join(a_ ).replace(a_ , " " ).strip()
return out_string
def snake_case_ ( self , a_ , a_ = False , a_ = None , a_ = True , **a_ , ):
a_ : Any = kwargs.pop("use_source_tokenizer" , a_ )
a_ : str = self.convert_ids_to_tokens(a_ , skip_special_tokens=a_ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
a_ : Dict = []
a_ : Tuple = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(a_ ) )
a_ : List[Any] = []
sub_texts.append(a_ )
else:
current_sub_text.append(a_ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(a_ ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
a_ : int = "".join(a_ )
a_ : List[str] = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
a_ : List[str] = self.clean_up_tokenization(a_ )
return clean_text
else:
return text
def snake_case_ ( self , a_ , a_ = None ):
a_ : Tuple = [self.sep_token_id]
a_ : str = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def snake_case_ ( self , a_ , a_ = None , a_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a_ , token_ids_a=a_ , already_has_special_tokens=a_ )
if token_ids_a is not None:
return ([0] * len(a_ )) + [1] + ([0] * len(a_ )) + [1, 1]
return ([0] * len(a_ )) + [1, 1]
def snake_case_ ( self , a_ , a_ = None ):
a_ : Union[str, Any] = [self.sep_token_id]
a_ : List[str] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def snake_case_ ( self , a_ , a_ = None ):
if not os.path.isdir(a_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
a_ : Tuple = os.path.join(
a_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a_ )
elif not os.path.isfile(self.vocab_file ):
with open(a_ , "wb" ) as fi:
a_ : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(a_ )
return (out_vocab_file,) | 237 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
SCREAMING_SNAKE_CASE_ = {"""configuration_dpt""": ["""DPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DPTConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = ["""DPTFeatureExtractor"""]
SCREAMING_SNAKE_CASE_ = ["""DPTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
"""DPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DPTForDepthEstimation""",
"""DPTForSemanticSegmentation""",
"""DPTModel""",
"""DPTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 237 | 1 |
"""simple docstring"""
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __a ( _lowerCAmelCase , unittest.TestCase ):
UpperCamelCase_ : str = FunnelTokenizer
UpperCamelCase_ : Any = FunnelTokenizerFast
UpperCamelCase_ : Union[str, Any] = True
UpperCamelCase_ : Tuple = True
def _SCREAMING_SNAKE_CASE ( self : Tuple )-> Optional[int]:
"""simple docstring"""
super().setUp()
UpperCamelCase = [
"<unk>",
"<cls>",
"<sep>",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , **UpperCAmelCase_ : Optional[Any] )-> str:
"""simple docstring"""
return FunnelTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Dict , **UpperCAmelCase_ : Any )-> Union[str, Any]:
"""simple docstring"""
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase_ : Tuple )-> str:
"""simple docstring"""
UpperCamelCase = "UNwant\u00E9d,running"
UpperCamelCase = "unwanted, running"
return input_text, output_text
def _SCREAMING_SNAKE_CASE ( self : str )-> List[str]:
"""simple docstring"""
UpperCamelCase = self.tokenizer_class(self.vocab_file )
UpperCamelCase = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(UpperCAmelCase_ , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [7, 4, 5, 10, 8, 9] )
def _SCREAMING_SNAKE_CASE ( self : List[Any] )-> Optional[int]:
"""simple docstring"""
UpperCamelCase = self.get_tokenizers(do_lower_case=UpperCAmelCase_ )
for tokenizer in tokenizers:
UpperCamelCase = tokenizer("UNwant\u00E9d,running" )
UpperCamelCase = len(inputs["input_ids"] ) - 1
self.assertListEqual(inputs["token_type_ids"] , [2] + [0] * sentence_len )
UpperCamelCase = tokenizer("UNwant\u00E9d,running" , "UNwant\u00E9d,running" )
self.assertListEqual(inputs["token_type_ids"] , [2] + [0] * sentence_len + [1] * sentence_len )
| 714 |
"""simple docstring"""
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
SCREAMING_SNAKE_CASE = 4
SCREAMING_SNAKE_CASE = 3
class __a ( _lowerCAmelCase ):
pass
def lowerCamelCase__ ( UpperCAmelCase_ )-> List[str]:
"""simple docstring"""
for shard in shards:
for i in range(UpperCAmelCase_ ):
yield {"i": i, "shard": shard}
def lowerCamelCase__ ( )-> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = int(os.environ["RANK"] )
UpperCamelCase = int(os.environ["WORLD_SIZE"] )
UpperCamelCase = ArgumentParser()
parser.add_argument("--streaming" , type=UpperCAmelCase_ )
parser.add_argument("--local_rank" , type=UpperCAmelCase_ )
parser.add_argument("--num_workers" , type=UpperCAmelCase_ , default=0 )
UpperCamelCase = parser.parse_args()
UpperCamelCase = args.streaming
UpperCamelCase = args.num_workers
UpperCamelCase = {"shards": [F"shard_{shard_idx}" for shard_idx in range(UpperCAmelCase_ )]}
UpperCamelCase = IterableDataset.from_generator(UpperCAmelCase_ , gen_kwargs=UpperCAmelCase_ )
if not streaming:
UpperCamelCase = Dataset.from_list(list(UpperCAmelCase_ ) )
UpperCamelCase = split_dataset_by_node(UpperCAmelCase_ , rank=UpperCAmelCase_ , world_size=UpperCAmelCase_ )
UpperCamelCase = torch.utils.data.DataLoader(UpperCAmelCase_ , num_workers=UpperCAmelCase_ )
UpperCamelCase = NUM_SHARDS * NUM_ITEMS_PER_SHARD
UpperCamelCase = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
UpperCamelCase = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(F"local_size {local_size} != expected_local_size {expected_local_size}" )
if __name__ == "__main__":
main()
| 556 | 0 |
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase ( __lowerCamelCase , unittest.TestCase ):
a__: str = FunnelTokenizer
a__: Optional[int] = FunnelTokenizerFast
a__: List[str] = True
a__: str = True
def _lowerCAmelCase ( self : int ):
super().setUp()
lowercase : List[Any] = [
'''<unk>''',
'''<cls>''',
'''<sep>''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
lowercase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def _lowerCAmelCase ( self : Tuple , **lowerCAmelCase : Tuple ):
return FunnelTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase )
def _lowerCAmelCase ( self : List[Any] , **lowerCAmelCase : Dict ):
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **lowerCAmelCase )
def _lowerCAmelCase ( self : List[Any] , lowerCAmelCase : int ):
lowercase : int = '''UNwant\u00E9d,running'''
lowercase : int = '''unwanted, running'''
return input_text, output_text
def _lowerCAmelCase ( self : int ):
lowercase : Tuple = self.tokenizer_class(self.vocab_file )
lowercase : List[str] = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(lowerCAmelCase , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase ) , [7, 4, 5, 10, 8, 9] )
def _lowerCAmelCase ( self : Optional[Any] ):
lowercase : int = self.get_tokenizers(do_lower_case=lowerCAmelCase )
for tokenizer in tokenizers:
lowercase : int = tokenizer('''UNwant\u00E9d,running''' )
lowercase : List[Any] = len(inputs['''input_ids'''] ) - 1
self.assertListEqual(inputs['''token_type_ids'''] , [2] + [0] * sentence_len )
lowercase : Union[str, Any] = tokenizer('''UNwant\u00E9d,running''' , '''UNwant\u00E9d,running''' )
self.assertListEqual(inputs['''token_type_ids'''] , [2] + [0] * sentence_len + [1] * sentence_len )
| 583 |
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
snake_case__ = logging.get_logger(__name__)
class UpperCAmelCase ( __lowerCamelCase ):
def __init__( self : Optional[Any] , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : List[Any] ):
warnings.warn(
'''The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DonutImageProcessor instead.''' , lowerCAmelCase , )
super().__init__(*lowerCAmelCase , **lowerCAmelCase )
| 583 | 1 |
"""simple docstring"""
import numpy as np
def __lowerCamelCase ( lowerCAmelCase__ ):
return 1 / (1 + np.exp(-vector ))
def __lowerCamelCase ( lowerCAmelCase__ ):
return vector * sigmoid(lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 554 |
"""simple docstring"""
import os
import numpy
import onnx
def __lowerCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
A__ = a.name
A__ = b.name
A__ = ''
A__ = ''
A__ = a == b
A__ = name_a
A__ = name_b
return res
def __lowerCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(lowerCAmelCase__ ,lowerCAmelCase__ )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g ,lowerCAmelCase__ ,lowerCAmelCase__ )
_graph_replace_input_with(node_proto.attribute[1].g ,lowerCAmelCase__ ,lowerCAmelCase__ )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g ,lowerCAmelCase__ ,lowerCAmelCase__ )
def __lowerCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
for n in graph_proto.node:
_node_replace_input_with(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
def __lowerCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
A__ = list(model.graph.initializer )
A__ = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
A__ = inits[i].name
A__ = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph ,lowerCAmelCase__ ,lowerCAmelCase__ )
def __lowerCamelCase ( lowerCAmelCase__ ):
A__ = os.path.dirname(lowerCAmelCase__ )
A__ = os.path.basename(lowerCAmelCase__ )
A__ = onnx.load(os.path.join(lowerCAmelCase__ ,lowerCAmelCase__ ) )
A__ = list(model.graph.initializer )
A__ = set()
A__ = {}
A__ = []
A__ = 0
for i in range(len(lowerCAmelCase__ ) ):
if i in dup_set:
continue
for j in range(i + 1 ,len(lowerCAmelCase__ ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] ,inits[j] ):
dup_set.add(lowerCAmelCase__ )
dup_set.add(lowerCAmelCase__ )
A__ = inits[j].data_type
A__ = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print('unexpected data type: ' ,lowerCAmelCase__ )
total_reduced_size += mem_size
A__ = inits[i].name
A__ = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(lowerCAmelCase__ )
else:
A__ = [name_j]
ind_to_replace.append((j, i) )
print('total reduced size: ' ,total_reduced_size / 1024 / 1024 / 1024 ,'GB' )
A__ = sorted(lowerCAmelCase__ )
_remove_dup_initializers_from_model(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
A__ = 'optimized_' + model_file_name
A__ = os.path.join(lowerCAmelCase__ ,lowerCAmelCase__ )
onnx.save(lowerCAmelCase__ ,lowerCAmelCase__ )
return new_model
| 554 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __a( unittest.TestCase ):
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=7 ,_SCREAMING_SNAKE_CASE=3 ,_SCREAMING_SNAKE_CASE=18 ,_SCREAMING_SNAKE_CASE=30 ,_SCREAMING_SNAKE_CASE=400 ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=True ,) -> List[Any]:
UpperCAmelCase_ : int = size if size is not None else {'''height''': 18, '''width''': 18}
UpperCAmelCase_ : List[str] = parent
UpperCAmelCase_ : Dict = batch_size
UpperCAmelCase_ : int = num_channels
UpperCAmelCase_ : int = image_size
UpperCAmelCase_ : Dict = min_resolution
UpperCAmelCase_ : Tuple = max_resolution
UpperCAmelCase_ : Any = do_resize
UpperCAmelCase_ : Tuple = size
UpperCAmelCase_ : Any = apply_ocr
def a__ ( self ) -> Dict:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class __a( _a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def a__ ( self ) -> str:
UpperCAmelCase_ : List[str] = LayoutLMvaImageProcessingTester(self )
@property
def a__ ( self ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self ) -> int:
UpperCAmelCase_ : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''do_resize''' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''size''' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''apply_ocr''' ) )
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'''height''': 18, '''width''': 18} )
UpperCAmelCase_ : str = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 )
self.assertEqual(image_processor.size ,{'''height''': 42, '''width''': 42} )
def a__ ( self ) -> Any:
pass
def a__ ( self ) -> Tuple:
# Initialize image_processing
UpperCAmelCase_ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ : List[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,Image.Image )
# Test not batched input
UpperCAmelCase_ : Optional[Any] = image_processing(image_inputs[0] ,return_tensors='''pt''' )
self.assertEqual(
encoding.pixel_values.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) ,)
self.assertIsInstance(encoding.words ,_SCREAMING_SNAKE_CASE )
self.assertIsInstance(encoding.boxes ,_SCREAMING_SNAKE_CASE )
# Test batched
UpperCAmelCase_ : Union[str, Any] = image_processing(_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) ,)
def a__ ( self ) -> Dict:
# Initialize image_processing
UpperCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ : Any = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_SCREAMING_SNAKE_CASE ,numpify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,np.ndarray )
# Test not batched input
UpperCAmelCase_ : Optional[Any] = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) ,)
# Test batched
UpperCAmelCase_ : str = image_processing(_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) ,)
def a__ ( self ) -> List[Any]:
# Initialize image_processing
UpperCAmelCase_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ : List[str] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_SCREAMING_SNAKE_CASE ,torchify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,torch.Tensor )
# Test not batched input
UpperCAmelCase_ : Dict = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) ,)
# Test batched
UpperCAmelCase_ : Optional[int] = image_processing(_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) ,)
def a__ ( self ) -> int:
# with apply_OCR = True
UpperCAmelCase_ : Optional[Any] = LayoutLMvaImageProcessor()
from datasets import load_dataset
UpperCAmelCase_ : str = load_dataset('''hf-internal-testing/fixtures_docvqa''' ,split='''test''' )
UpperCAmelCase_ : Tuple = Image.open(ds[0]['''file'''] ).convert('''RGB''' )
UpperCAmelCase_ : Tuple = image_processing(_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape ,(1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) ,len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
UpperCAmelCase_ : List[str] = [['''11:14''', '''to''', '''11:39''', '''a.m''', '''11:39''', '''to''', '''11:44''', '''a.m.''', '''11:44''', '''a.m.''', '''to''', '''12:25''', '''p.m.''', '''12:25''', '''to''', '''12:58''', '''p.m.''', '''12:58''', '''to''', '''4:00''', '''p.m.''', '''2:00''', '''to''', '''5:00''', '''p.m.''', '''Coffee''', '''Break''', '''Coffee''', '''will''', '''be''', '''served''', '''for''', '''men''', '''and''', '''women''', '''in''', '''the''', '''lobby''', '''adjacent''', '''to''', '''exhibit''', '''area.''', '''Please''', '''move''', '''into''', '''exhibit''', '''area.''', '''(Exhibits''', '''Open)''', '''TRRF''', '''GENERAL''', '''SESSION''', '''(PART''', '''|)''', '''Presiding:''', '''Lee''', '''A.''', '''Waller''', '''TRRF''', '''Vice''', '''President''', '''“Introductory''', '''Remarks”''', '''Lee''', '''A.''', '''Waller,''', '''TRRF''', '''Vice''', '''Presi-''', '''dent''', '''Individual''', '''Interviews''', '''with''', '''TRRF''', '''Public''', '''Board''', '''Members''', '''and''', '''Sci-''', '''entific''', '''Advisory''', '''Council''', '''Mem-''', '''bers''', '''Conducted''', '''by''', '''TRRF''', '''Treasurer''', '''Philip''', '''G.''', '''Kuehn''', '''to''', '''get''', '''answers''', '''which''', '''the''', '''public''', '''refrigerated''', '''warehousing''', '''industry''', '''is''', '''looking''', '''for.''', '''Plus''', '''questions''', '''from''', '''the''', '''floor.''', '''Dr.''', '''Emil''', '''M.''', '''Mrak,''', '''University''', '''of''', '''Cal-''', '''ifornia,''', '''Chairman,''', '''TRRF''', '''Board;''', '''Sam''', '''R.''', '''Cecil,''', '''University''', '''of''', '''Georgia''', '''College''', '''of''', '''Agriculture;''', '''Dr.''', '''Stanley''', '''Charm,''', '''Tufts''', '''University''', '''School''', '''of''', '''Medicine;''', '''Dr.''', '''Robert''', '''H.''', '''Cotton,''', '''ITT''', '''Continental''', '''Baking''', '''Company;''', '''Dr.''', '''Owen''', '''Fennema,''', '''University''', '''of''', '''Wis-''', '''consin;''', '''Dr.''', '''Robert''', '''E.''', '''Hardenburg,''', '''USDA.''', '''Questions''', '''and''', '''Answers''', '''Exhibits''', '''Open''', '''Capt.''', '''Jack''', '''Stoney''', '''Room''', '''TRRF''', '''Scientific''', '''Advisory''', '''Council''', '''Meeting''', '''Ballroom''', '''Foyer''']] # noqa: E231
UpperCAmelCase_ : Optional[int] = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words ,_SCREAMING_SNAKE_CASE )
self.assertListEqual(encoding.boxes ,_SCREAMING_SNAKE_CASE )
# with apply_OCR = False
UpperCAmelCase_ : Union[str, Any] = LayoutLMvaImageProcessor(apply_ocr=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = image_processing(_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape ,(1, 3, 224, 224) ) | 30 |
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
snake_case__ : Optional[Any] = logging.get_logger(__name__)
class _A ( _lowercase ):
'''simple docstring'''
def __init__( self : Dict , *lowerCamelCase : List[Any] , **lowerCamelCase : int ):
'''simple docstring'''
warnings.warn(
"The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use MobileViTImageProcessor instead." , lowerCamelCase , )
super().__init__(*lowerCamelCase , **lowerCamelCase )
| 402 | 0 |
"""simple docstring"""
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def UpperCAmelCase ( A : Tuple , A : List[str]=False ):
'''simple docstring'''
try:
_UpperCAmelCase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_UpperCAmelCase = default
else:
# KEY is set, convert it to True or False.
try:
_UpperCAmelCase = strtobool(A )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'If set, {key} must be yes or no.' )
return _value
lowercase = parse_flag_from_env('''RUN_SLOW''', default=False)
lowercase = parse_flag_from_env('''RUN_REMOTE''', default=False)
lowercase = parse_flag_from_env('''RUN_LOCAL''', default=True)
lowercase = parse_flag_from_env('''RUN_PACKAGED''', default=True)
# Compression
lowercase = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''')
lowercase = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''')
lowercase = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''')
# Audio
lowercase = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''),
reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''',
)
# Beam
lowercase = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''),
reason='''test requires apache-beam and a compatible dill version''',
)
# Dill-cloudpickle compatibility
lowercase = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('''0.3.2'''),
reason='''test requires dill>0.3.2 for cloudpickle compatibility''',
)
# Windows
lowercase = pytest.mark.skipif(
sys.platform == '''win32''',
reason='''test should not be run on Windows''',
)
def UpperCAmelCase ( A : Optional[Any] ):
'''simple docstring'''
try:
import faiss # noqa
except ImportError:
_UpperCAmelCase = unittest.skip('test requires faiss' )(A )
return test_case
def UpperCAmelCase ( A : List[Any] ):
'''simple docstring'''
try:
import regex # noqa
except ImportError:
_UpperCAmelCase = unittest.skip('test requires regex' )(A )
return test_case
def UpperCAmelCase ( A : List[str] ):
'''simple docstring'''
try:
import elasticsearch # noqa
except ImportError:
_UpperCAmelCase = unittest.skip('test requires elasticsearch' )(A )
return test_case
def UpperCAmelCase ( A : Optional[int] ):
'''simple docstring'''
try:
import sqlalchemy # noqa
except ImportError:
_UpperCAmelCase = unittest.skip('test requires sqlalchemy' )(A )
return test_case
def UpperCAmelCase ( A : str ):
'''simple docstring'''
if not config.TORCH_AVAILABLE:
_UpperCAmelCase = unittest.skip('test requires PyTorch' )(A )
return test_case
def UpperCAmelCase ( A : Tuple ):
'''simple docstring'''
if not config.TF_AVAILABLE:
_UpperCAmelCase = unittest.skip('test requires TensorFlow' )(A )
return test_case
def UpperCAmelCase ( A : Optional[Any] ):
'''simple docstring'''
if not config.JAX_AVAILABLE:
_UpperCAmelCase = unittest.skip('test requires JAX' )(A )
return test_case
def UpperCAmelCase ( A : str ):
'''simple docstring'''
if not config.PIL_AVAILABLE:
_UpperCAmelCase = unittest.skip('test requires Pillow' )(A )
return test_case
def UpperCAmelCase ( A : Union[str, Any] ):
'''simple docstring'''
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('test requires transformers' )(A )
else:
return test_case
def UpperCAmelCase ( A : Optional[int] ):
'''simple docstring'''
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('test requires tiktoken' )(A )
else:
return test_case
def UpperCAmelCase ( A : Any ):
'''simple docstring'''
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('test requires spacy' )(A )
else:
return test_case
def UpperCAmelCase ( A : Any ):
'''simple docstring'''
def _require_spacy_model(A : Dict ):
try:
import spacy # noqa F401
spacy.load(A )
except ImportError:
return unittest.skip('test requires spacy' )(A )
except OSError:
return unittest.skip('test requires spacy model \'{}\''.format(A ) )(A )
else:
return test_case
return _require_spacy_model
def UpperCAmelCase ( A : str ):
'''simple docstring'''
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('test requires pyspark' )(A )
else:
return test_case
def UpperCAmelCase ( A : Optional[int] ):
'''simple docstring'''
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('test requires joblibspark' )(A )
else:
return test_case
def UpperCAmelCase ( A : List[str] ):
'''simple docstring'''
if not _run_slow_tests or _run_slow_tests == 0:
_UpperCAmelCase = unittest.skip('test is slow' )(A )
return test_case
def UpperCAmelCase ( A : List[Any] ):
'''simple docstring'''
if not _run_local_tests or _run_local_tests == 0:
_UpperCAmelCase = unittest.skip('test is local' )(A )
return test_case
def UpperCAmelCase ( A : Dict ):
'''simple docstring'''
if not _run_packaged_tests or _run_packaged_tests == 0:
_UpperCAmelCase = unittest.skip('test is packaged' )(A )
return test_case
def UpperCAmelCase ( A : List[Any] ):
'''simple docstring'''
if not _run_remote_tests or _run_remote_tests == 0:
_UpperCAmelCase = unittest.skip('test requires remote' )(A )
return test_case
def UpperCAmelCase ( *A : List[Any] ):
'''simple docstring'''
def decorate(cls : Tuple ):
for name, fn in cls.__dict__.items():
if callable(A ) and name.startswith('test' ):
for decorator in decorators:
_UpperCAmelCase = decorator(A )
setattr(cls , A , A )
return cls
return decorate
class lowercase__ ( A ):
'''simple docstring'''
pass
class lowercase__ ( A ):
'''simple docstring'''
_UpperCAmelCase = 0
_UpperCAmelCase = 1
_UpperCAmelCase = 2
@contextmanager
def UpperCAmelCase ( A : Optional[Any]=OfflineSimulationMode.CONNECTION_FAILS , A : Optional[int]=1e-16 ):
'''simple docstring'''
_UpperCAmelCase = requests.Session().request
def timeout_request(A : str , A : Tuple , A : List[Any] , **A : List[str] ):
# Change the url to an invalid url so that the connection hangs
_UpperCAmelCase = 'https://10.255.255.1'
if kwargs.get('timeout' ) is None:
raise RequestWouldHangIndefinitelyError(
f'Tried a call to {url} in offline mode with no timeout set. Please set a timeout.' )
_UpperCAmelCase = timeout
try:
return online_request(A , A , **A )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
_UpperCAmelCase = url
_UpperCAmelCase = e.args[0]
_UpperCAmelCase = (max_retry_error.args[0].replace('10.255.255.1' , f'OfflineMock[{url}]' ),)
_UpperCAmelCase = (max_retry_error,)
raise
def raise_connection_error(A : Dict , A : int , **A : Optional[int] ):
raise requests.ConnectionError('Offline mode is enabled.' , request=A )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('requests.Session.send' , A ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('requests.Session.request' , A ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('datasets.config.HF_DATASETS_OFFLINE' , A ):
yield
else:
raise ValueError('Please use a value from the OfflineSimulationMode enum.' )
@contextmanager
def UpperCAmelCase ( *A : List[str] , **A : Optional[Any] ):
'''simple docstring'''
_UpperCAmelCase = str(Path().resolve() )
with tempfile.TemporaryDirectory(*A , **A ) as tmp_dir:
try:
os.chdir(A )
yield
finally:
os.chdir(A )
@contextmanager
def UpperCAmelCase ( ):
'''simple docstring'''
import gc
gc.collect()
_UpperCAmelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def UpperCAmelCase ( ):
'''simple docstring'''
import gc
gc.collect()
_UpperCAmelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def UpperCAmelCase ( A : Optional[Any] , A : str ):
'''simple docstring'''
return deepcopy(A ).integers(0 , 100 , 10 ).tolist() == deepcopy(A ).integers(0 , 100 , 10 ).tolist()
def UpperCAmelCase ( A : int ):
'''simple docstring'''
import decorator
from requests.exceptions import HTTPError
def _wrapper(A : str , *A : Dict , **A : List[str] ):
try:
return func(*A , **A )
except HTTPError as err:
if str(A ).startswith('500' ) or str(A ).startswith('502' ):
pytest.xfail(str(A ) )
raise err
return decorator.decorator(_wrapper , A )
class lowercase__ :
'''simple docstring'''
def __init__( self , snake_case , snake_case , snake_case ) -> Optional[Any]:
_UpperCAmelCase = returncode
_UpperCAmelCase = stdout
_UpperCAmelCase = stderr
async def UpperCAmelCase ( A : List[str] , A : int ):
'''simple docstring'''
while True:
_UpperCAmelCase = await stream.readline()
if line:
callback(A )
else:
break
async def UpperCAmelCase ( A : str , A : Tuple=None , A : List[str]=None , A : Dict=None , A : int=False , A : Dict=False ):
'''simple docstring'''
if echo:
print('\nRunning: ' , ' '.join(A ) )
_UpperCAmelCase = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=A , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=A , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
_UpperCAmelCase = []
_UpperCAmelCase = []
def tee(A : Optional[int] , A : Optional[Any] , A : Optional[int] , A : Optional[Any]="" ):
_UpperCAmelCase = line.decode('utf-8' ).rstrip()
sink.append(A )
if not quiet:
print(A , A , file=A )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda A : tee(A , A , sys.stdout , label='stdout:' ) ),
_read_stream(p.stderr , lambda A : tee(A , A , sys.stderr , label='stderr:' ) ),
] , timeout=A , )
return _RunOutput(await p.wait() , A , A )
def UpperCAmelCase ( A : Tuple , A : Any=None , A : Dict=None , A : Optional[int]=180 , A : str=False , A : Dict=True ):
'''simple docstring'''
_UpperCAmelCase = asyncio.get_event_loop()
_UpperCAmelCase = loop.run_until_complete(
_stream_subprocess(A , env=A , stdin=A , timeout=A , quiet=A , echo=A ) )
_UpperCAmelCase = ' '.join(A )
if result.returncode > 0:
_UpperCAmelCase = '\n'.join(result.stderr )
raise RuntimeError(
f'\'{cmd_str}\' failed with returncode {result.returncode}\n\n'
f'The combined stderr from workers follows:\n{stderr}' )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f'\'{cmd_str}\' produced no output.' )
return result
def UpperCAmelCase ( ):
'''simple docstring'''
_UpperCAmelCase = os.environ.get('PYTEST_XDIST_WORKER' , 'gw0' )
_UpperCAmelCase = re.sub(r'^gw' , '' , A , 0 , re.M )
return int(A )
def UpperCAmelCase ( ):
'''simple docstring'''
_UpperCAmelCase = 2_9500
_UpperCAmelCase = pytest_xdist_worker_id()
return port + uniq_delta
| 715 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_UpperCAmelCase = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def lowerCamelCase_ ( self , snake_case , snake_case , snake_case ) -> Dict:
_UpperCAmelCase = TextaTextGenerationPipeline(model=snake_case , tokenizer=snake_case )
return generator, ["Something to write", "Something else"]
def lowerCamelCase_ ( self , snake_case , snake_case ) -> Dict:
_UpperCAmelCase = generator('Something there' )
self.assertEqual(snake_case , [{'generated_text': ANY(snake_case )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]['generated_text'].startswith('Something there' ) )
_UpperCAmelCase = generator(['This is great !', 'Something else'] , num_return_sequences=2 , do_sample=snake_case )
self.assertEqual(
snake_case , [
[{'generated_text': ANY(snake_case )}, {'generated_text': ANY(snake_case )}],
[{'generated_text': ANY(snake_case )}, {'generated_text': ANY(snake_case )}],
] , )
_UpperCAmelCase = generator(
['This is great !', 'Something else'] , num_return_sequences=2 , batch_size=2 , do_sample=snake_case )
self.assertEqual(
snake_case , [
[{'generated_text': ANY(snake_case )}, {'generated_text': ANY(snake_case )}],
[{'generated_text': ANY(snake_case )}, {'generated_text': ANY(snake_case )}],
] , )
with self.assertRaises(snake_case ):
generator(4 )
@require_torch
def lowerCamelCase_ ( self ) -> Dict:
_UpperCAmelCase = pipeline('text2text-generation' , model='patrickvonplaten/t5-tiny-random' , framework='pt' )
# do_sample=False necessary for reproducibility
_UpperCAmelCase = generator('Something there' , do_sample=snake_case )
self.assertEqual(snake_case , [{'generated_text': ''}] )
_UpperCAmelCase = 3
_UpperCAmelCase = generator(
'Something there' , num_return_sequences=snake_case , num_beams=snake_case , )
_UpperCAmelCase = [
{'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide Beide'},
{'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide'},
{'generated_text': ''},
]
self.assertEqual(snake_case , snake_case )
_UpperCAmelCase = generator('This is a test' , do_sample=snake_case , num_return_sequences=2 , return_tensors=snake_case )
self.assertEqual(
snake_case , [
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
] , )
_UpperCAmelCase = generator.model.config.eos_token_id
_UpperCAmelCase = '<pad>'
_UpperCAmelCase = generator(
['This is a test', 'This is a second test'] , do_sample=snake_case , num_return_sequences=2 , batch_size=2 , return_tensors=snake_case , )
self.assertEqual(
snake_case , [
[
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
],
[
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
],
] , )
@require_tf
def lowerCamelCase_ ( self ) -> Any:
_UpperCAmelCase = pipeline('text2text-generation' , model='patrickvonplaten/t5-tiny-random' , framework='tf' )
# do_sample=False necessary for reproducibility
_UpperCAmelCase = generator('Something there' , do_sample=snake_case )
self.assertEqual(snake_case , [{'generated_text': ''}] )
| 24 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class a__ ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> Dict:
lowerCAmelCase__ = jnp.ones((batch_size, length) ) / length
return scores
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
lowerCAmelCase__ = None
lowerCAmelCase__ = 20
lowerCAmelCase__ = self._get_uniform_logits(batch_size=2 , length=lowerCamelCase_ )
# tweak scores to not be uniform anymore
lowerCAmelCase__ = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
lowerCAmelCase__ = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
lowerCAmelCase__ = jax.nn.softmax(lowerCamelCase_ , axis=-1 )
lowerCAmelCase__ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCAmelCase__ = FlaxTemperatureLogitsWarper(temperature=1.3 )
lowerCAmelCase__ = jax.nn.softmax(temp_dist_warper_sharper(lowerCamelCase_ , scores.copy() , cur_len=lowerCamelCase_ ) , axis=-1 )
lowerCAmelCase__ = jax.nn.softmax(temp_dist_warper_smoother(lowerCamelCase_ , scores.copy() , cur_len=lowerCamelCase_ ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1e-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1e-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
lowerCAmelCase__ = None
lowerCAmelCase__ = 10
lowerCAmelCase__ = 2
# create ramp distribution
lowerCAmelCase__ = np.broadcast_to(np.arange(lowerCamelCase_ )[None, :] , (batch_size, vocab_size) ).copy()
lowerCAmelCase__ = ramp_logits[1:, : vocab_size // 2] + vocab_size
lowerCAmelCase__ = FlaxTopKLogitsWarper(3 )
lowerCAmelCase__ = top_k_warp(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
lowerCAmelCase__ = 5
lowerCAmelCase__ = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
lowerCAmelCase__ = np.broadcast_to(np.arange(lowerCamelCase_ )[None, :] , (batch_size, length) ).copy()
lowerCAmelCase__ = top_k_warp_safety_check(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
lowerCAmelCase__ = None
lowerCAmelCase__ = 10
lowerCAmelCase__ = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
lowerCAmelCase__ = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
lowerCAmelCase__ = FlaxTopPLogitsWarper(0.8 )
lowerCAmelCase__ = np.exp(top_p_warp(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
lowerCAmelCase__ = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 ) )
# check edge cases with negative and extreme logits
lowerCAmelCase__ = np.broadcast_to(np.arange(lowerCamelCase_ )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
lowerCAmelCase__ = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
lowerCAmelCase__ = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
lowerCAmelCase__ = top_p_warp(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
lowerCAmelCase__ = 20
lowerCAmelCase__ = 4
lowerCAmelCase__ = 0
lowerCAmelCase__ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowerCamelCase_ )
# check that min length is applied at length 5
lowerCAmelCase__ = ids_tensor((batch_size, 20) , vocab_size=20 )
lowerCAmelCase__ = 5
lowerCAmelCase__ = self._get_uniform_logits(lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ = min_dist_processor(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('''inf''' )] )
# check that min length is not applied anymore at length 15
lowerCAmelCase__ = self._get_uniform_logits(lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ = 15
lowerCAmelCase__ = min_dist_processor(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ )
self.assertFalse(jnp.isinf(lowerCamelCase_ ).any() )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
lowerCAmelCase__ = 20
lowerCAmelCase__ = 4
lowerCAmelCase__ = 0
lowerCAmelCase__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCamelCase_ )
# check that all scores are -inf except the bos_token_id score
lowerCAmelCase__ = ids_tensor((batch_size, 1) , vocab_size=20 )
lowerCAmelCase__ = 1
lowerCAmelCase__ = self._get_uniform_logits(lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ = logits_processor(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
lowerCAmelCase__ = 3
lowerCAmelCase__ = self._get_uniform_logits(lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ = logits_processor(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ )
self.assertFalse(jnp.isinf(lowerCamelCase_ ).any() )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
lowerCAmelCase__ = 20
lowerCAmelCase__ = 4
lowerCAmelCase__ = 0
lowerCAmelCase__ = 5
lowerCAmelCase__ = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCamelCase_ , eos_token_id=lowerCamelCase_ )
# check that all scores are -inf except the eos_token_id when max_length is reached
lowerCAmelCase__ = ids_tensor((batch_size, 4) , vocab_size=20 )
lowerCAmelCase__ = 4
lowerCAmelCase__ = self._get_uniform_logits(lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ = logits_processor(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
lowerCAmelCase__ = 3
lowerCAmelCase__ = self._get_uniform_logits(lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ = logits_processor(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ )
self.assertFalse(jnp.isinf(lowerCamelCase_ ).any() )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
lowerCAmelCase__ = 4
lowerCAmelCase__ = 10
lowerCAmelCase__ = 15
lowerCAmelCase__ = 2
lowerCAmelCase__ = 1
lowerCAmelCase__ = 15
# dummy input_ids and scores
lowerCAmelCase__ = ids_tensor((batch_size, sequence_length) , lowerCamelCase_ )
lowerCAmelCase__ = input_ids.copy()
lowerCAmelCase__ = self._get_uniform_logits(lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ = scores.copy()
# instantiate all dist processors
lowerCAmelCase__ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCAmelCase__ = FlaxTopKLogitsWarper(3 )
lowerCAmelCase__ = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowerCAmelCase__ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowerCamelCase_ )
lowerCAmelCase__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCamelCase_ )
lowerCAmelCase__ = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCamelCase_ , eos_token_id=lowerCamelCase_ )
lowerCAmelCase__ = 10
# no processor list
lowerCAmelCase__ = temp_dist_warp(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ )
lowerCAmelCase__ = top_k_warp(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ )
lowerCAmelCase__ = top_p_warp(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ )
lowerCAmelCase__ = min_dist_proc(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ )
lowerCAmelCase__ = bos_dist_proc(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ )
lowerCAmelCase__ = eos_dist_proc(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ )
# with processor list
lowerCAmelCase__ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowerCAmelCase__ = processor(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ )
# scores should be equal
self.assertTrue(jnp.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
lowerCAmelCase__ = 4
lowerCAmelCase__ = 10
lowerCAmelCase__ = 15
lowerCAmelCase__ = 2
lowerCAmelCase__ = 1
lowerCAmelCase__ = 15
# dummy input_ids and scores
lowerCAmelCase__ = ids_tensor((batch_size, sequence_length) , lowerCamelCase_ )
lowerCAmelCase__ = input_ids.copy()
lowerCAmelCase__ = self._get_uniform_logits(lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ = scores.copy()
# instantiate all dist processors
lowerCAmelCase__ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCAmelCase__ = FlaxTopKLogitsWarper(3 )
lowerCAmelCase__ = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowerCAmelCase__ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowerCamelCase_ )
lowerCAmelCase__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCamelCase_ )
lowerCAmelCase__ = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCamelCase_ , eos_token_id=lowerCamelCase_ )
lowerCAmelCase__ = 10
# no processor list
def run_no_processor_list(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
lowerCAmelCase__ = temp_dist_warp(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ )
lowerCAmelCase__ = top_k_warp(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ )
lowerCAmelCase__ = top_p_warp(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ )
lowerCAmelCase__ = min_dist_proc(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ )
lowerCAmelCase__ = bos_dist_proc(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ )
lowerCAmelCase__ = eos_dist_proc(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ )
return scores
# with processor list
def run_processor_list(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
lowerCAmelCase__ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowerCAmelCase__ = processor(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ )
return scores
lowerCAmelCase__ = jax.jit(lowerCamelCase_ )
lowerCAmelCase__ = jax.jit(lowerCamelCase_ )
lowerCAmelCase__ = jitted_run_no_processor_list(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ = jitted_run_processor_list(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# scores should be equal
self.assertTrue(jnp.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() ) | 90 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
def _snake_case ( A , A=False , A=False , A=False ) -> Union[str, Any]:
lowerCAmelCase__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""transformer.blocks.{i}.norm1.weight""", F"""vilt.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.norm1.bias""", F"""vilt.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""transformer.blocks.{i}.attn.proj.weight""", F"""vilt.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(F"""transformer.blocks.{i}.attn.proj.bias""", F"""vilt.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""transformer.blocks.{i}.norm2.weight""", F"""vilt.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.norm2.bias""", F"""vilt.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append(
(F"""transformer.blocks.{i}.mlp.fc1.weight""", F"""vilt.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc1.bias""", F"""vilt.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.weight""", F"""vilt.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.bias""", F"""vilt.encoder.layer.{i}.output.dense.bias""") )
# embeddings
rename_keys.extend(
[
# text embeddings
('''text_embeddings.word_embeddings.weight''', '''vilt.embeddings.text_embeddings.word_embeddings.weight'''),
(
'''text_embeddings.position_embeddings.weight''',
'''vilt.embeddings.text_embeddings.position_embeddings.weight''',
),
('''text_embeddings.position_ids''', '''vilt.embeddings.text_embeddings.position_ids'''),
(
'''text_embeddings.token_type_embeddings.weight''',
'''vilt.embeddings.text_embeddings.token_type_embeddings.weight''',
),
('''text_embeddings.LayerNorm.weight''', '''vilt.embeddings.text_embeddings.LayerNorm.weight'''),
('''text_embeddings.LayerNorm.bias''', '''vilt.embeddings.text_embeddings.LayerNorm.bias'''),
# patch embeddings
('''transformer.cls_token''', '''vilt.embeddings.cls_token'''),
('''transformer.patch_embed.proj.weight''', '''vilt.embeddings.patch_embeddings.projection.weight'''),
('''transformer.patch_embed.proj.bias''', '''vilt.embeddings.patch_embeddings.projection.bias'''),
('''transformer.pos_embed''', '''vilt.embeddings.position_embeddings'''),
# token type embeddings
('''token_type_embeddings.weight''', '''vilt.embeddings.token_type_embeddings.weight'''),
] )
# final layernorm + pooler
rename_keys.extend(
[
('''transformer.norm.weight''', '''vilt.layernorm.weight'''),
('''transformer.norm.bias''', '''vilt.layernorm.bias'''),
('''pooler.dense.weight''', '''vilt.pooler.dense.weight'''),
('''pooler.dense.bias''', '''vilt.pooler.dense.bias'''),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
('''vqa_classifier.0.weight''', '''classifier.0.weight'''),
('''vqa_classifier.0.bias''', '''classifier.0.bias'''),
('''vqa_classifier.1.weight''', '''classifier.1.weight'''),
('''vqa_classifier.1.bias''', '''classifier.1.bias'''),
('''vqa_classifier.3.weight''', '''classifier.3.weight'''),
('''vqa_classifier.3.bias''', '''classifier.3.bias'''),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
('''nlvr2_classifier.0.weight''', '''classifier.0.weight'''),
('''nlvr2_classifier.0.bias''', '''classifier.0.bias'''),
('''nlvr2_classifier.1.weight''', '''classifier.1.weight'''),
('''nlvr2_classifier.1.bias''', '''classifier.1.bias'''),
('''nlvr2_classifier.3.weight''', '''classifier.3.weight'''),
('''nlvr2_classifier.3.bias''', '''classifier.3.bias'''),
] )
else:
pass
return rename_keys
def _snake_case ( A , A ) -> List[str]:
for i in range(config.num_hidden_layers ):
lowerCAmelCase__ = '''vilt.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase__ = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.weight""" )
lowerCAmelCase__ = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase__ = in_proj_weight[
: config.hidden_size, :
]
lowerCAmelCase__ = in_proj_bias[: config.hidden_size]
lowerCAmelCase__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase__ = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase__ = in_proj_bias[-config.hidden_size :]
def _snake_case ( A ) -> List[str]:
lowerCAmelCase__ = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(A , A )
def _snake_case ( A , A , A ) -> str:
lowerCAmelCase__ = dct.pop(A )
lowerCAmelCase__ = val
@torch.no_grad()
def _snake_case ( A , A ) -> Any:
lowerCAmelCase__ = ViltConfig(image_size=384 , patch_size=32 , tie_word_embeddings=A )
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
if "vqa" in checkpoint_url:
lowerCAmelCase__ = True
lowerCAmelCase__ = 3129
lowerCAmelCase__ = '''huggingface/label-files'''
lowerCAmelCase__ = '''vqa2-id2label.json'''
lowerCAmelCase__ = json.load(open(hf_hub_download(A , A , repo_type='''dataset''' ) , '''r''' ) )
lowerCAmelCase__ = {int(A ): v for k, v in idalabel.items()}
lowerCAmelCase__ = idalabel
lowerCAmelCase__ = {v: k for k, v in idalabel.items()}
lowerCAmelCase__ = ViltForQuestionAnswering(A )
elif "nlvr" in checkpoint_url:
lowerCAmelCase__ = True
lowerCAmelCase__ = 2
lowerCAmelCase__ = {0: '''False''', 1: '''True'''}
lowerCAmelCase__ = {v: k for k, v in config.idalabel.items()}
lowerCAmelCase__ = 3
lowerCAmelCase__ = ViltForImagesAndTextClassification(A )
elif "irtr" in checkpoint_url:
lowerCAmelCase__ = True
lowerCAmelCase__ = ViltForImageAndTextRetrieval(A )
elif "mlm_itm" in checkpoint_url:
lowerCAmelCase__ = True
lowerCAmelCase__ = ViltForMaskedLM(A )
else:
raise ValueError('''Unknown model type''' )
# load state_dict of original model, remove and rename some keys
lowerCAmelCase__ = torch.hub.load_state_dict_from_url(A , map_location='''cpu''' )['''state_dict''']
lowerCAmelCase__ = create_rename_keys(A , A , A , A )
for src, dest in rename_keys:
rename_key(A , A , A )
read_in_q_k_v(A , A )
if mlm_model or irtr_model:
lowerCAmelCase__ = ['''itm_score.fc.weight''', '''itm_score.fc.bias''']
for k in ignore_keys:
state_dict.pop(A , A )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
lowerCAmelCase__ , lowerCAmelCase__ = model.load_state_dict(A , strict=A )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(A )
# Define processor
lowerCAmelCase__ = ViltImageProcessor(size=384 )
lowerCAmelCase__ = BertTokenizer.from_pretrained('''bert-base-uncased''' )
lowerCAmelCase__ = ViltProcessor(A , A )
# Forward pass on example inputs (image + text)
if nlvr_model:
lowerCAmelCase__ = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' , stream=A ).raw )
lowerCAmelCase__ = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' , stream=A ).raw )
lowerCAmelCase__ = (
'''The left image contains twice the number of dogs as the right image, and at least two dogs in total are'''
''' standing.'''
)
lowerCAmelCase__ = processor(A , A , return_tensors='''pt''' )
lowerCAmelCase__ = processor(A , A , return_tensors='''pt''' )
lowerCAmelCase__ = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
lowerCAmelCase__ = Image.open(requests.get('''http://images.cocodataset.org/val2017/000000039769.jpg''' , stream=A ).raw )
if mlm_model:
lowerCAmelCase__ = '''a bunch of [MASK] laying on a [MASK].'''
else:
lowerCAmelCase__ = '''How many cats are there?'''
lowerCAmelCase__ = processor(A , A , return_tensors='''pt''' )
lowerCAmelCase__ = model(**A )
# Verify outputs
if mlm_model:
lowerCAmelCase__ = torch.Size([1, 11, 30522] )
lowerCAmelCase__ = torch.tensor([-12.5_061, -12.5_123, -12.5_174] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , A , atol=1E-4 )
# verify masked token prediction equals "cats"
lowerCAmelCase__ = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
lowerCAmelCase__ = torch.Size([1, 3129] )
lowerCAmelCase__ = torch.tensor([-15.9_495, -18.1_472, -10.3_041] )
assert torch.allclose(outputs.logits[0, :3] , A , atol=1E-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , A , atol=1E-4 )
# verify vqa prediction equals "2"
lowerCAmelCase__ = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
lowerCAmelCase__ = torch.Size([1, 2] )
lowerCAmelCase__ = torch.tensor([-2.8_721, 2.1_291] )
assert torch.allclose(outputs.logits[0, :3] , A , atol=1E-4 )
assert outputs.logits.shape == expected_shape
Path(A ).mkdir(exist_ok=A )
print(F"""Saving model and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(A )
processor.save_pretrained(A )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__UpperCAmelCase = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path) | 90 | 1 |
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
SCREAMING_SNAKE_CASE_ : Tuple = 1_6
SCREAMING_SNAKE_CASE_ : str = 3_2
def _snake_case ( UpperCAmelCase_ : Accelerator , UpperCAmelCase_ : int = 16 ):
A__ = AutoTokenizer.from_pretrained("""bert-base-cased""" )
A__ = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(UpperCAmelCase_ : Tuple ):
# max_length=None => use the model max length (it's actually the default)
A__ = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
A__ = datasets.map(
UpperCAmelCase_ , batched=UpperCAmelCase_ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A__ = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(UpperCAmelCase_ : int ):
# On TPU it's best to pad everything to the same length or training will be very slow.
A__ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
A__ = 16
elif accelerator.mixed_precision != "no":
A__ = 8
else:
A__ = None
return tokenizer.pad(
UpperCAmelCase_ , padding="""longest""" , max_length=UpperCAmelCase_ , pad_to_multiple_of=UpperCAmelCase_ , return_tensors="""pt""" , )
# Instantiate dataloaders.
A__ = DataLoader(
tokenized_datasets["""train"""] , shuffle=UpperCAmelCase_ , collate_fn=UpperCAmelCase_ , batch_size=UpperCAmelCase_ )
A__ = DataLoader(
tokenized_datasets["""validation"""] , shuffle=UpperCAmelCase_ , collate_fn=UpperCAmelCase_ , batch_size=UpperCAmelCase_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
SCREAMING_SNAKE_CASE_ : Dict = mocked_dataloaders # noqa: F811
def _snake_case ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Dict ):
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , UpperCAmelCase_ ) == "1":
A__ = 2
# New Code #
A__ = int(args.gradient_accumulation_steps )
# Initialize accelerator
A__ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=UpperCAmelCase_ )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
"""Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`""" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A__ = config["""lr"""]
A__ = int(config["""num_epochs"""] )
A__ = int(config["""seed"""] )
A__ = int(config["""batch_size"""] )
A__ = evaluate.load("""glue""" , """mrpc""" )
set_seed(UpperCAmelCase_ )
A__ , A__ = get_dataloaders(UpperCAmelCase_ , UpperCAmelCase_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A__ = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=UpperCAmelCase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
A__ = model.to(accelerator.device )
# Instantiate optimizer
A__ = AdamW(params=model.parameters() , lr=UpperCAmelCase_ )
# Instantiate scheduler
A__ = get_linear_schedule_with_warmup(
optimizer=UpperCAmelCase_ , num_warmup_steps=100 , num_training_steps=(len(UpperCAmelCase_ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A__ , A__ , A__ , A__ , A__ = accelerator.prepare(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# Now we train the model
for epoch in range(UpperCAmelCase_ ):
model.train()
for step, batch in enumerate(UpperCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(UpperCAmelCase_ ):
A__ = model(**UpperCAmelCase_ )
A__ = output.loss
accelerator.backward(UpperCAmelCase_ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(UpperCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A__ = model(**UpperCAmelCase_ )
A__ = outputs.logits.argmax(dim=-1 )
A__ , A__ = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=UpperCAmelCase_ , references=UpperCAmelCase_ , )
A__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , UpperCAmelCase_ )
def _snake_case ( ):
A__ = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
# New Code #
parser.add_argument(
"""--gradient_accumulation_steps""" , type=UpperCAmelCase_ , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
A__ = parser.parse_args()
A__ = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(UpperCAmelCase_ , UpperCAmelCase_ )
if __name__ == "__main__":
main()
| 500 |
"""simple docstring"""
def _snake_case ( UpperCAmelCase_ : int ):
A__ = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def _snake_case ( UpperCAmelCase_ : int ):
A__ = 0
while number > 0:
A__ = number % 10
sum_of_digits += last_digit
A__ = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def _snake_case ( UpperCAmelCase_ : int = 100 ):
A__ = factorial(UpperCAmelCase_ )
A__ = split_and_add(UpperCAmelCase_ )
return result
if __name__ == "__main__":
print(solution(int(input('Enter the Number: ').strip())))
| 500 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple:
# Construct model
if openai_config_file == "":
lowerCamelCase_ = OpenAIGPTConfig()
else:
lowerCamelCase_ = OpenAIGPTConfig.from_json_file(__UpperCamelCase )
lowerCamelCase_ = OpenAIGPTModel(__UpperCamelCase )
# Load weights from numpy
load_tf_weights_in_openai_gpt(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# Save pytorch-model
lowerCamelCase_ = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
lowerCamelCase_ = pytorch_dump_folder_path + '/' + CONFIG_NAME
print(f'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(model.state_dict() ,__UpperCamelCase )
print(f'''Save configuration file to {pytorch_config_dump_path}''' )
with open(__UpperCamelCase ,'w' ,encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--openai_checkpoint_folder_path",
default=None,
type=str,
required=True,
help="Path to the TensorFlow checkpoint path.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--openai_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained OpenAI model. \n"
"This specifies the model architecture."
),
)
A_ = parser.parse_args()
convert_openai_checkpoint_to_pytorch(
args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path
)
| 42 |
'''simple docstring'''
from math import isclose, sqrt
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> tuple[float, float, float]:
lowerCamelCase_ = point_y / 4 / point_x
lowerCamelCase_ = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
lowerCamelCase_ = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
lowerCamelCase_ = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
lowerCamelCase_ = outgoing_gradient**2 + 4
lowerCamelCase_ = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
lowerCamelCase_ = (point_y - outgoing_gradient * point_x) ** 2 - 1_00
lowerCamelCase_ = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
lowerCamelCase_ = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
lowerCamelCase_ = x_minus if isclose(__UpperCamelCase ,__UpperCamelCase ) else x_plus
lowerCamelCase_ = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def _UpperCamelCase ( __UpperCamelCase = 1.4 ,__UpperCamelCase = -9.6 ) -> int:
lowerCamelCase_ = 0
lowerCamelCase_ = first_x_coord
lowerCamelCase_ = first_y_coord
lowerCamelCase_ = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = next_point(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(f'''{solution() = }''')
| 42 | 1 |
import numpy as np
UpperCAmelCase_ : List[Any] = [
["""a""", """b""", """c""", """d""", """e"""],
["""f""", """g""", """h""", """i""", """k"""],
["""l""", """m""", """n""", """o""", """p"""],
["""q""", """r""", """s""", """t""", """u"""],
["""v""", """w""", """x""", """y""", """z"""],
]
class lowercase__ :
def __init__( self ):
lowerCAmelCase_ : Dict = np.array(_lowercase )
def UpperCAmelCase__ ( self , _lowercase ):
lowerCAmelCase_ , lowerCAmelCase_ : Tuple = np.where(letter == self.SQUARE )
lowerCAmelCase_ : Any = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def UpperCAmelCase__ ( self , _lowercase , _lowercase ):
lowerCAmelCase_ : List[Any] = self.SQUARE[indexa - 1, indexa - 1]
return letter
def UpperCAmelCase__ ( self , _lowercase ):
lowerCAmelCase_ : List[str] = message.lower()
lowerCAmelCase_ : Any = message.replace(""" """ , """""" )
lowerCAmelCase_ : Union[str, Any] = message.replace("""j""" , """i""" )
lowerCAmelCase_ : Dict = np.empty((2, len(_lowercase )) )
for letter_index in range(len(_lowercase ) ):
lowerCAmelCase_ : Optional[int] = self.letter_to_numbers(message[letter_index] )
lowerCAmelCase_ : List[str] = numbers[0]
lowerCAmelCase_ : Optional[int] = numbers[1]
lowerCAmelCase_ : List[str] = first_step.reshape(2 * len(_lowercase ) )
lowerCAmelCase_ : List[Any] = """"""
for numbers_index in range(len(_lowercase ) ):
lowerCAmelCase_ : List[str] = int(second_step[numbers_index * 2] )
lowerCAmelCase_ : Dict = int(second_step[(numbers_index * 2) + 1] )
lowerCAmelCase_ : Tuple = self.numbers_to_letter(_lowercase , _lowercase )
lowerCAmelCase_ : Dict = encoded_message + letter
return encoded_message
def UpperCAmelCase__ ( self , _lowercase ):
lowerCAmelCase_ : str = message.lower()
message.replace(""" """ , """""" )
lowerCAmelCase_ : Optional[int] = np.empty(2 * len(_lowercase ) )
for letter_index in range(len(_lowercase ) ):
lowerCAmelCase_ : Optional[int] = self.letter_to_numbers(message[letter_index] )
lowerCAmelCase_ : List[Any] = numbers[0]
lowerCAmelCase_ : List[Any] = numbers[1]
lowerCAmelCase_ : Dict = first_step.reshape((2, len(_lowercase )) )
lowerCAmelCase_ : Any = """"""
for numbers_index in range(len(_lowercase ) ):
lowerCAmelCase_ : Optional[int] = int(second_step[0, numbers_index] )
lowerCAmelCase_ : Any = int(second_step[1, numbers_index] )
lowerCAmelCase_ : Optional[Any] = self.numbers_to_letter(_lowercase , _lowercase )
lowerCAmelCase_ : List[Any] = decoded_message + letter
return decoded_message
| 440 |
from collections import namedtuple
UpperCAmelCase_ : Union[str, Any] = namedtuple("""from_to""", """from_ to""")
UpperCAmelCase_ : int = {
"""cubicmeter""": from_to(1, 1),
"""litre""": from_to(0.0_01, 10_00),
"""kilolitre""": from_to(1, 1),
"""gallon""": from_to(0.0_04_54, 2_64.1_72),
"""cubicyard""": from_to(0.7_64_55, 1.3_07_95),
"""cubicfoot""": from_to(0.0_28, 35.31_47),
"""cup""": from_to(0.0_00_23_65_88, 42_26.75),
}
def _lowerCAmelCase ( _a : float , _a : str , _a : str ) -> float:
if from_type not in METRIC_CONVERSION:
raise ValueError(
F'Invalid \'from_type\' value: {from_type!r} Supported values are:\n'
+ """, """.join(_a ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
F'Invalid \'to_type\' value: {to_type!r}. Supported values are:\n'
+ """, """.join(_a ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 440 | 1 |
'''simple docstring'''
class __UpperCamelCase :
def __init__( self :List[Any] ):
snake_case_ : Union[str, Any] = {}
def a__ ( self :List[Any] ):
print(self.vertex )
for i in self.vertex:
print(_UpperCAmelCase ,""" -> """ ,""" -> """.join([str(_UpperCAmelCase ) for j in self.vertex[i]] ) )
def a__ ( self :List[Any] ,_UpperCamelCase :Optional[int] ,_UpperCamelCase :int ):
if from_vertex in self.vertex:
self.vertex[from_vertex].append(_UpperCAmelCase )
else:
# else make a new vertex
snake_case_ : Tuple = [to_vertex]
def a__ ( self :Dict ):
snake_case_ : str = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(_UpperCAmelCase ,_UpperCAmelCase )
def a__ ( self :Tuple ,_UpperCamelCase :Optional[int] ,_UpperCamelCase :int ):
snake_case_ : Dict = True
print(_UpperCAmelCase ,end=""" """ )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(_UpperCAmelCase ,_UpperCAmelCase )
if __name__ == "__main__":
__A : Any = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print('DFS:')
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3 | 334 |
'''simple docstring'''
def _lowerCAmelCase ( __snake_case : Optional[Any] ) -> Optional[Any]: # noqa: E741
__A : Tuple = len(__snake_case )
__A : Optional[int] = 0
__A : str = [0] * n
__A : int = [False] * n
__A : Tuple = [False] * n
def dfs(__snake_case : List[str] , __snake_case : List[Any] , __snake_case : Optional[Any] , __snake_case : int ):
if parent == root:
out_edge_count += 1
__A : str = True
__A : Tuple = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
__A : Optional[int] = dfs(__snake_case , __snake_case , __snake_case , __snake_case )
__A : int = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
__A : Tuple = True
# AP found via cycle
if at == low[to]:
__A : Optional[Any] = True
else:
__A : Any = min(low[at] , __snake_case )
return out_edge_count
for i in range(__snake_case ):
if not visited[i]:
__A : Tuple = 0
__A : List[Any] = dfs(__snake_case , __snake_case , -1 , __snake_case )
__A : Union[str, Any] = out_edge_count > 1
for x in range(len(__snake_case ) ):
if is_art[x] is True:
print(__snake_case )
# Adjacency list of graph
lowercase__ : Tuple = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data) | 8 | 0 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase__ :
def __init__( self , _lowercase , _lowercase=3 , _lowercase=32 , _lowercase=3 , _lowercase=10 , _lowercase=[10, 20, 30, 40] , _lowercase=[1, 1, 2, 1] , _lowercase=True , _lowercase=True , _lowercase="relu" , _lowercase=3 , _lowercase=None , ):
lowerCAmelCase_ : Union[str, Any] = parent
lowerCAmelCase_ : int = batch_size
lowerCAmelCase_ : int = image_size
lowerCAmelCase_ : List[str] = num_channels
lowerCAmelCase_ : Optional[int] = embeddings_size
lowerCAmelCase_ : Optional[int] = hidden_sizes
lowerCAmelCase_ : str = depths
lowerCAmelCase_ : str = is_training
lowerCAmelCase_ : int = use_labels
lowerCAmelCase_ : Optional[int] = hidden_act
lowerCAmelCase_ : Optional[int] = num_labels
lowerCAmelCase_ : Dict = scope
lowerCAmelCase_ : Union[str, Any] = len(_A )
def UpperCAmelCase__ ( self ):
lowerCAmelCase_ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase_ : Optional[Any] = None
if self.use_labels:
lowerCAmelCase_ : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
lowerCAmelCase_ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ ( self ):
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase ):
lowerCAmelCase_ : Optional[Any] = TFResNetModel(config=_A )
lowerCAmelCase_ : List[str] = model(_A )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase ):
lowerCAmelCase_ : Any = self.num_labels
lowerCAmelCase_ : Dict = TFResNetForImageClassification(_A )
lowerCAmelCase_ : int = model(_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase__ ( self ):
lowerCAmelCase_ : Union[str, Any] = self.prepare_config_and_inputs()
lowerCAmelCase_ : Tuple = config_and_inputs
lowerCAmelCase_ : Any = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class lowercase__ ( a__ , a__ , unittest.TestCase ):
__UpperCamelCase = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
__UpperCamelCase = (
{"""feature-extraction""": TFResNetModel, """image-classification""": TFResNetForImageClassification}
if is_tf_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def UpperCAmelCase__ ( self ):
lowerCAmelCase_ : List[str] = TFResNetModelTester(self )
lowerCAmelCase_ : List[str] = ConfigTester(self , config_class=_A , has_text_modality=_A )
def UpperCAmelCase__ ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase__ ( self ):
return
@unittest.skip(reason="""ResNet does not use inputs_embeds""" )
def UpperCAmelCase__ ( self ):
pass
@unittest.skip(reason="""ResNet does not support input and output embeddings""" )
def UpperCAmelCase__ ( self ):
pass
def UpperCAmelCase__ ( self ):
lowerCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : int = model_class(_A )
lowerCAmelCase_ : Union[str, Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_ : Any = [*signature.parameters.keys()]
lowerCAmelCase_ : str = ['pixel_values']
self.assertListEqual(arg_names[:1] , _A )
def UpperCAmelCase__ ( self ):
lowerCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def UpperCAmelCase__ ( self ):
def check_hidden_states_output(_lowercase , _lowercase , _lowercase ):
lowerCAmelCase_ : int = model_class(_A )
lowerCAmelCase_ : int = model(**self._prepare_for_class(_A , _A ) )
lowerCAmelCase_ : Dict = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCAmelCase_ : int = self.model_tester.num_stages
self.assertEqual(len(_A ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ : Any = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowerCAmelCase_ : Optional[int] = layer_type
lowerCAmelCase_ : Tuple = True
check_hidden_states_output(_A , _A , _A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase_ : Any = True
check_hidden_states_output(_A , _A , _A )
def UpperCAmelCase__ ( self ):
lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
@slow
def UpperCAmelCase__ ( self ):
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ : Optional[Any] = TFResNetModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def _lowerCAmelCase ( ) -> Optional[int]:
lowerCAmelCase_ : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class lowercase__ ( unittest.TestCase ):
@cached_property
def UpperCAmelCase__ ( self ):
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase__ ( self ):
lowerCAmelCase_ : str = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
lowerCAmelCase_ : Tuple = self.default_image_processor
lowerCAmelCase_ : Optional[Any] = prepare_img()
lowerCAmelCase_ : int = image_processor(images=_A , return_tensors="""tf""" )
# forward pass
lowerCAmelCase_ : int = model(**_A )
# verify the logits
lowerCAmelCase_ : Optional[Any] = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , _A )
lowerCAmelCase_ : Any = tf.constant([-11.1_069, -9.7877, -8.3777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , _A , atol=1e-4 ) )
| 716 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : Tuple = logging.get_logger(__name__)
UpperCAmelCase_ : List[str] = {
"""google/fnet-base""": """https://huggingface.co/google/fnet-base/resolve/main/config.json""",
"""google/fnet-large""": """https://huggingface.co/google/fnet-large/resolve/main/config.json"""
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class lowercase__ ( __A ):
__UpperCamelCase = """fnet"""
def __init__( self , _lowercase=32_000 , _lowercase=768 , _lowercase=12 , _lowercase=3_072 , _lowercase="gelu_new" , _lowercase=0.1 , _lowercase=512 , _lowercase=4 , _lowercase=0.02 , _lowercase=1e-12 , _lowercase=False , _lowercase=512 , _lowercase=3 , _lowercase=1 , _lowercase=2 , **_lowercase , ):
super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
lowerCAmelCase_ : Optional[Any] = vocab_size
lowerCAmelCase_ : List[str] = max_position_embeddings
lowerCAmelCase_ : str = hidden_size
lowerCAmelCase_ : Any = num_hidden_layers
lowerCAmelCase_ : Optional[int] = intermediate_size
lowerCAmelCase_ : int = hidden_act
lowerCAmelCase_ : List[str] = hidden_dropout_prob
lowerCAmelCase_ : Tuple = initializer_range
lowerCAmelCase_ : int = type_vocab_size
lowerCAmelCase_ : str = layer_norm_eps
lowerCAmelCase_ : Optional[Any] = use_tpu_fourier_optimizations
lowerCAmelCase_ : Union[str, Any] = tpu_short_seq_length
| 440 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.