code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self ):
'''simple docstring'''
snake_case: dict[str, TrieNode] = {} # Mapping from char to TrieNode
snake_case: Union[str, Any] = False
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
for word in words:
self.insert(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: List[Any] = self
for char in word:
if char not in curr.nodes:
snake_case: List[Any] = TrieNode()
snake_case: Union[str, Any] = curr.nodes[char]
snake_case: Optional[Any] = True
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: int = self
for char in word:
if char not in curr.nodes:
return False
snake_case: Tuple = curr.nodes[char]
return curr.is_leaf
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def _delete(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> bool:
if index == len(SCREAMING_SNAKE_CASE__ ):
# If word does not exist
if not curr.is_leaf:
return False
snake_case: int = False
return len(curr.nodes ) == 0
snake_case: Any = word[index]
snake_case: List[Any] = curr.nodes.get(SCREAMING_SNAKE_CASE__ )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
snake_case: str = _delete(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , SCREAMING_SNAKE_CASE__ , 0 )
def lowerCAmelCase_ ( __A : TrieNode , __A : str ):
'''simple docstring'''
if node.is_leaf:
print(__A , end=' ' )
for key, value in node.nodes.items():
print_words(__A , word + key )
def lowerCAmelCase_ ( ):
'''simple docstring'''
snake_case: Optional[Any] = 'banana bananas bandana band apple all beast'.split()
snake_case: Optional[Any] = TrieNode()
root.insert_many(__A )
# print_words(root, "")
assert all(root.find(__A ) for word in words )
assert root.find('banana' )
assert not root.find('bandanas' )
assert not root.find('apps' )
assert root.find('apple' )
assert root.find('all' )
root.delete('all' )
assert not root.find('all' )
root.delete('banana' )
assert not root.find('banana' )
assert root.find('bananas' )
return True
def lowerCAmelCase_ ( __A : str , __A : bool ):
'''simple docstring'''
print(str(__A ) , 'works!' if passes else 'doesn\'t work :(' )
def lowerCAmelCase_ ( ):
'''simple docstring'''
assert test_trie()
def lowerCAmelCase_ ( ):
'''simple docstring'''
print_results('Testing trie functionality' , test_trie() )
if __name__ == "__main__":
main() | 703 |
'''simple docstring'''
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = "geglu" , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = "layer_norm" , SCREAMING_SNAKE_CASE__ = False , ):
'''simple docstring'''
super().__init__()
snake_case: List[str] = only_cross_attention
snake_case: Optional[Any] = (num_embeds_ada_norm is not None) and norm_type == 'ada_norm_zero'
snake_case: Tuple = (num_embeds_ada_norm is not None) and norm_type == 'ada_norm'
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
F"""`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to"""
F""" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.""" )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
snake_case: List[str] = AdaLayerNorm(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif self.use_ada_layer_norm_zero:
snake_case: str = AdaLayerNormZero(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
snake_case: int = nn.LayerNorm(SCREAMING_SNAKE_CASE__ , elementwise_affine=SCREAMING_SNAKE_CASE__ )
snake_case: List[str] = Attention(
query_dim=SCREAMING_SNAKE_CASE__ , heads=SCREAMING_SNAKE_CASE__ , dim_head=SCREAMING_SNAKE_CASE__ , dropout=SCREAMING_SNAKE_CASE__ , bias=SCREAMING_SNAKE_CASE__ , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=SCREAMING_SNAKE_CASE__ , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
snake_case: Tuple = (
AdaLayerNorm(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if self.use_ada_layer_norm
else nn.LayerNorm(SCREAMING_SNAKE_CASE__ , elementwise_affine=SCREAMING_SNAKE_CASE__ )
)
snake_case: Any = Attention(
query_dim=SCREAMING_SNAKE_CASE__ , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=SCREAMING_SNAKE_CASE__ , dim_head=SCREAMING_SNAKE_CASE__ , dropout=SCREAMING_SNAKE_CASE__ , bias=SCREAMING_SNAKE_CASE__ , upcast_attention=SCREAMING_SNAKE_CASE__ , ) # is self-attn if encoder_hidden_states is none
else:
snake_case: int = None
snake_case: Tuple = None
# 3. Feed-forward
snake_case: Union[str, Any] = nn.LayerNorm(SCREAMING_SNAKE_CASE__ , elementwise_affine=SCREAMING_SNAKE_CASE__ )
snake_case: List[str] = FeedForward(SCREAMING_SNAKE_CASE__ , dropout=SCREAMING_SNAKE_CASE__ , activation_fn=SCREAMING_SNAKE_CASE__ , final_dropout=SCREAMING_SNAKE_CASE__ )
# let chunk size default to None
snake_case: Any = None
snake_case: Any = 0
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: Dict = chunk_size
snake_case: str = dim
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , ):
'''simple docstring'''
if self.use_ada_layer_norm:
snake_case: Optional[int] = self.norma(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif self.use_ada_layer_norm_zero:
snake_case , snake_case , snake_case , snake_case , snake_case: int = self.norma(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , hidden_dtype=hidden_states.dtype )
else:
snake_case: List[str] = self.norma(SCREAMING_SNAKE_CASE__ )
snake_case: Optional[Any] = cross_attention_kwargs if cross_attention_kwargs is not None else {}
snake_case: List[str] = self.attna(
SCREAMING_SNAKE_CASE__ , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
if self.use_ada_layer_norm_zero:
snake_case: Tuple = gate_msa.unsqueeze(1 ) * attn_output
snake_case: List[str] = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
snake_case: Dict = (
self.norma(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if self.use_ada_layer_norm else self.norma(SCREAMING_SNAKE_CASE__ )
)
snake_case: Any = self.attna(
SCREAMING_SNAKE_CASE__ , encoder_hidden_states=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
snake_case: List[str] = attn_output + hidden_states
# 3. Feed-forward
snake_case: str = self.norma(SCREAMING_SNAKE_CASE__ )
if self.use_ada_layer_norm_zero:
snake_case: str = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
F"""`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.""" )
snake_case: List[str] = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
snake_case: Optional[Any] = torch.cat(
[self.ff(SCREAMING_SNAKE_CASE__ ) for hid_slice in norm_hidden_states.chunk(SCREAMING_SNAKE_CASE__ , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
snake_case: int = self.ff(SCREAMING_SNAKE_CASE__ )
if self.use_ada_layer_norm_zero:
snake_case: Union[str, Any] = gate_mlp.unsqueeze(1 ) * ff_output
snake_case: Tuple = ff_output + hidden_states
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = 4 , SCREAMING_SNAKE_CASE__ = 0.0 , SCREAMING_SNAKE_CASE__ = "geglu" , SCREAMING_SNAKE_CASE__ = False , ):
'''simple docstring'''
super().__init__()
snake_case: int = int(dim * mult )
snake_case: Optional[Any] = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
snake_case: int = GELU(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if activation_fn == "gelu-approximate":
snake_case: Optional[Any] = GELU(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , approximate='tanh' )
elif activation_fn == "geglu":
snake_case: List[Any] = GEGLU(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif activation_fn == "geglu-approximate":
snake_case: Optional[int] = ApproximateGELU(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case: Any = nn.ModuleList([] )
# project in
self.net.append(SCREAMING_SNAKE_CASE__ )
# project dropout
self.net.append(nn.Dropout(SCREAMING_SNAKE_CASE__ ) )
# project out
self.net.append(nn.Linear(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(SCREAMING_SNAKE_CASE__ ) )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
for module in self.net:
snake_case: Optional[int] = module(SCREAMING_SNAKE_CASE__ )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = "none" ):
'''simple docstring'''
super().__init__()
snake_case: Optional[int] = nn.Linear(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = approximate
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if gate.device.type != "mps":
return F.gelu(SCREAMING_SNAKE_CASE__ , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: Dict = self.proj(SCREAMING_SNAKE_CASE__ )
snake_case: Dict = self.gelu(SCREAMING_SNAKE_CASE__ )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
super().__init__()
snake_case: Tuple = nn.Linear(SCREAMING_SNAKE_CASE__ , dim_out * 2 )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if gate.device.type != "mps":
return F.gelu(SCREAMING_SNAKE_CASE__ )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case , snake_case: int = self.proj(SCREAMING_SNAKE_CASE__ ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(SCREAMING_SNAKE_CASE__ )
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
super().__init__()
snake_case: str = nn.Linear(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: Optional[int] = self.proj(SCREAMING_SNAKE_CASE__ )
return x * torch.sigmoid(1.7_02 * x )
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
super().__init__()
snake_case: Optional[Any] = nn.Embedding(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case: Optional[Any] = nn.SiLU()
snake_case: Union[str, Any] = nn.Linear(SCREAMING_SNAKE_CASE__ , embedding_dim * 2 )
snake_case: int = nn.LayerNorm(SCREAMING_SNAKE_CASE__ , elementwise_affine=SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: Dict = self.linear(self.silu(self.emb(SCREAMING_SNAKE_CASE__ ) ) )
snake_case , snake_case: Dict = torch.chunk(SCREAMING_SNAKE_CASE__ , 2 )
snake_case: str = self.norm(SCREAMING_SNAKE_CASE__ ) * (1 + scale) + shift
return x
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
super().__init__()
snake_case: str = CombinedTimestepLabelEmbeddings(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case: int = nn.SiLU()
snake_case: Any = nn.Linear(SCREAMING_SNAKE_CASE__ , 6 * embedding_dim , bias=SCREAMING_SNAKE_CASE__ )
snake_case: List[Any] = nn.LayerNorm(SCREAMING_SNAKE_CASE__ , elementwise_affine=SCREAMING_SNAKE_CASE__ , eps=1E-6 )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
snake_case: int = self.linear(self.silu(self.emb(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , hidden_dtype=SCREAMING_SNAKE_CASE__ ) ) )
snake_case , snake_case , snake_case , snake_case , snake_case , snake_case: str = emb.chunk(6 , dim=1 )
snake_case: Dict = self.norm(SCREAMING_SNAKE_CASE__ ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = 1E-5 ):
'''simple docstring'''
super().__init__()
snake_case: str = num_groups
snake_case: str = eps
if act_fn is None:
snake_case: Dict = None
else:
snake_case: List[str] = get_activation(SCREAMING_SNAKE_CASE__ )
snake_case: Any = nn.Linear(SCREAMING_SNAKE_CASE__ , out_dim * 2 )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if self.act:
snake_case: Optional[Any] = self.act(SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = self.linear(SCREAMING_SNAKE_CASE__ )
snake_case: List[Any] = emb[:, :, None, None]
snake_case , snake_case: List[Any] = emb.chunk(2 , dim=1 )
snake_case: Any = F.group_norm(SCREAMING_SNAKE_CASE__ , self.num_groups , eps=self.eps )
snake_case: Optional[int] = x * (1 + scale) + shift
return x | 692 | 0 |
'''simple docstring'''
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger()
@dataclass
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
__UpperCamelCase = 42
__UpperCamelCase = field(default_factory=snake_case )
__UpperCamelCase = field(default_factory=snake_case )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: Dict = len(list(m.modules() ) ) == 1 or isinstance(SCREAMING_SNAKE_CASE__ , nn.Convad ) or isinstance(SCREAMING_SNAKE_CASE__ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(SCREAMING_SNAKE_CASE__ )
def __call__( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(SCREAMING_SNAKE_CASE__ )
[x.remove() for x in self.handles]
return self
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
return list(filter(lambda SCREAMING_SNAKE_CASE__ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 1
__UpperCamelCase = field(default_factory=snake_case )
__UpperCamelCase = field(default_factory=snake_case )
__UpperCamelCase = True
def __call__( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: int = Tracker(self.dest )(SCREAMING_SNAKE_CASE__ ).parametrized
snake_case: List[str] = Tracker(self.src )(SCREAMING_SNAKE_CASE__ ).parametrized
snake_case: Tuple = list(filter(lambda SCREAMING_SNAKE_CASE__ : type(SCREAMING_SNAKE_CASE__ ) not in self.src_skip , SCREAMING_SNAKE_CASE__ ) )
snake_case: Optional[int] = list(filter(lambda SCREAMING_SNAKE_CASE__ : type(SCREAMING_SNAKE_CASE__ ) not in self.dest_skip , SCREAMING_SNAKE_CASE__ ) )
if len(SCREAMING_SNAKE_CASE__ ) != len(SCREAMING_SNAKE_CASE__ ) and self.raise_if_mismatch:
raise Exception(
F"""Numbers of operations are different. Source module has {len(SCREAMING_SNAKE_CASE__ )} operations while"""
F""" destination module has {len(SCREAMING_SNAKE_CASE__ )}.""" )
for dest_m, src_m in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F"""Transfered from={src_m} to={dest_m}""" )
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
super().__init__()
snake_case: List[Tuple[str, nn.Module]] = []
# - get the stem
feature_blocks.append(('conv1', model.stem) )
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith('block' ), F"""Unexpected layer name {k}"""
snake_case: Optional[int] = len(SCREAMING_SNAKE_CASE__ ) + 1
feature_blocks.append((F"""res{block_index}""", v) )
snake_case: Optional[int] = nn.ModuleDict(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return get_trunk_forward_outputs(
SCREAMING_SNAKE_CASE__ , out_feat_keys=SCREAMING_SNAKE_CASE__ , feature_blocks=self._feature_blocks , )
class SCREAMING_SNAKE_CASE ( snake_case ):
'''simple docstring'''
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: Optional[Any] = x.split('-' )
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] )
def __getitem__( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if x not in self:
snake_case: str = self.convert_name_to_timm(SCREAMING_SNAKE_CASE__ )
snake_case: List[Any] = partial(lambda: (timm.create_model(SCREAMING_SNAKE_CASE__ , pretrained=SCREAMING_SNAKE_CASE__ ).eval(), None) )
else:
snake_case: str = super().__getitem__(SCREAMING_SNAKE_CASE__ )
return val
class SCREAMING_SNAKE_CASE ( snake_case ):
'''simple docstring'''
def __getitem__( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if "seer" in x and "in1k" not in x:
snake_case: List[Any] = RegNetModel
else:
snake_case: str = RegNetForImageClassification
return val
def lowerCAmelCase_ ( __A : List[str] , __A : Tuple , __A : List[Tuple[str, str]] ):
'''simple docstring'''
for from_key, to_key in keys:
snake_case: Tuple = from_state_dict[from_key].clone()
print(f"""Copied key={from_key} to={to_key}""" )
return to_state_dict
def lowerCAmelCase_ ( __A : str , __A : Callable[[], nn.Module] , __A : Callable[[], nn.Module] , __A : RegNetConfig , __A : Path , __A : bool = True , ):
'''simple docstring'''
print(f"""Converting {name}...""" )
with torch.no_grad():
snake_case: Optional[int] = from_model_func()
snake_case: str = our_model_func(__A ).eval()
snake_case: Dict = ModuleTransfer(src=__A , dest=__A , raise_if_mismatch=__A )
snake_case: Tuple = torch.randn((1, 3, 2_24, 2_24) )
module_transfer(__A )
if from_state_dict is not None:
snake_case: str = []
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
snake_case: Optional[int] = [('0.clf.0.weight', 'classifier.1.weight'), ('0.clf.0.bias', 'classifier.1.bias')]
snake_case: int = manually_copy_vissl_head(__A , our_model.state_dict() , __A )
our_model.load_state_dict(__A )
snake_case: Tuple = our_model(__A , output_hidden_states=__A )
snake_case: int = (
our_outputs.logits if isinstance(__A , __A ) else our_outputs.last_hidden_state
)
snake_case: Any = from_model(__A )
snake_case: Dict = from_output[-1] if type(__A ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
snake_case: Optional[int] = our_outputs.hidden_states[-1]
assert torch.allclose(__A , __A ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name , commit_message='Add model' , use_temp_dir=__A , )
snake_case: Union[str, Any] = 2_24 if 'seer' not in name else 3_84
# we can use the convnext one
snake_case: Union[str, Any] = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' , size=__A )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name , commit_message='Add image processor' , use_temp_dir=__A , )
print(f"""Pushed {name}""" )
def lowerCAmelCase_ ( __A : Path , __A : str = None , __A : bool = True ):
'''simple docstring'''
snake_case: List[str] = 'imagenet-1k-id2label.json'
snake_case: Tuple = 10_00
snake_case: Any = (1, num_labels)
snake_case: Optional[Any] = 'huggingface/label-files'
snake_case: int = num_labels
snake_case: Optional[Any] = json.load(open(cached_download(hf_hub_url(__A , __A , repo_type='dataset' ) ) , 'r' ) )
snake_case: Any = {int(__A ): v for k, v in idalabel.items()}
snake_case: int = idalabel
snake_case: str = {v: k for k, v in idalabel.items()}
snake_case: Any = partial(__A , num_labels=__A , idalabel=__A , labelaid=__A )
snake_case: List[Any] = {
'regnet-x-002': ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 1_52, 3_68] , groups_width=8 , layer_type='x' ),
'regnet-x-004': ImageNetPreTrainedConfig(
depths=[1, 2, 7, 12] , hidden_sizes=[32, 64, 1_60, 3_84] , groups_width=16 , layer_type='x' ),
'regnet-x-006': ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7] , hidden_sizes=[48, 96, 2_40, 5_28] , groups_width=24 , layer_type='x' ),
'regnet-x-008': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5] , hidden_sizes=[64, 1_28, 2_88, 6_72] , groups_width=16 , layer_type='x' ),
'regnet-x-016': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 2] , hidden_sizes=[72, 1_68, 4_08, 9_12] , groups_width=24 , layer_type='x' ),
'regnet-x-032': ImageNetPreTrainedConfig(
depths=[2, 6, 15, 2] , hidden_sizes=[96, 1_92, 4_32, 10_08] , groups_width=48 , layer_type='x' ),
'regnet-x-040': ImageNetPreTrainedConfig(
depths=[2, 5, 14, 2] , hidden_sizes=[80, 2_40, 5_60, 13_60] , groups_width=40 , layer_type='x' ),
'regnet-x-064': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[1_68, 3_92, 7_84, 16_24] , groups_width=56 , layer_type='x' ),
'regnet-x-080': ImageNetPreTrainedConfig(
depths=[2, 5, 15, 1] , hidden_sizes=[80, 2_40, 7_20, 19_20] , groups_width=1_20 , layer_type='x' ),
'regnet-x-120': ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[2_24, 4_48, 8_96, 22_40] , groups_width=1_12 , layer_type='x' ),
'regnet-x-160': ImageNetPreTrainedConfig(
depths=[2, 6, 13, 1] , hidden_sizes=[2_56, 5_12, 8_96, 20_48] , groups_width=1_28 , layer_type='x' ),
'regnet-x-320': ImageNetPreTrainedConfig(
depths=[2, 7, 13, 1] , hidden_sizes=[3_36, 6_72, 13_44, 25_20] , groups_width=1_68 , layer_type='x' ),
# y variant
'regnet-y-002': ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 1_52, 3_68] , groups_width=8 ),
'regnet-y-004': ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6] , hidden_sizes=[48, 1_04, 2_08, 4_40] , groups_width=8 ),
'regnet-y-006': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4] , hidden_sizes=[48, 1_12, 2_56, 6_08] , groups_width=16 ),
'regnet-y-008': ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2] , hidden_sizes=[64, 1_28, 3_20, 7_68] , groups_width=16 ),
'regnet-y-016': ImageNetPreTrainedConfig(
depths=[2, 6, 17, 2] , hidden_sizes=[48, 1_20, 3_36, 8_88] , groups_width=24 ),
'regnet-y-032': ImageNetPreTrainedConfig(
depths=[2, 5, 13, 1] , hidden_sizes=[72, 2_16, 5_76, 15_12] , groups_width=24 ),
'regnet-y-040': ImageNetPreTrainedConfig(
depths=[2, 6, 12, 2] , hidden_sizes=[1_28, 1_92, 5_12, 10_88] , groups_width=64 ),
'regnet-y-064': ImageNetPreTrainedConfig(
depths=[2, 7, 14, 2] , hidden_sizes=[1_44, 2_88, 5_76, 12_96] , groups_width=72 ),
'regnet-y-080': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[1_68, 4_48, 8_96, 20_16] , groups_width=56 ),
'regnet-y-120': ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[2_24, 4_48, 8_96, 22_40] , groups_width=1_12 ),
'regnet-y-160': ImageNetPreTrainedConfig(
depths=[2, 4, 11, 1] , hidden_sizes=[2_24, 4_48, 12_32, 30_24] , groups_width=1_12 ),
'regnet-y-320': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[2_32, 6_96, 13_92, 37_12] , groups_width=2_32 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
'regnet-y-320-seer': RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[2_32, 6_96, 13_92, 37_12] , groups_width=2_32 ),
'regnet-y-640-seer': RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[3_28, 9_84, 19_68, 49_20] , groups_width=3_28 ),
'regnet-y-1280-seer': RegNetConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[5_28, 10_56, 29_04, 73_92] , groups_width=2_64 ),
'regnet-y-2560-seer': RegNetConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[6_40, 16_96, 25_44, 50_88] , groups_width=6_40 ),
'regnet-y-10b-seer': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[20_20, 40_40, 1_11_10, 2_82_80] , groups_width=10_10 ),
# finetuned on imagenet
'regnet-y-320-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[2_32, 6_96, 13_92, 37_12] , groups_width=2_32 ),
'regnet-y-640-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[3_28, 9_84, 19_68, 49_20] , groups_width=3_28 ),
'regnet-y-1280-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[5_28, 10_56, 29_04, 73_92] , groups_width=2_64 ),
'regnet-y-2560-seer-in1k': ImageNetPreTrainedConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[6_40, 16_96, 25_44, 50_88] , groups_width=6_40 ),
'regnet-y-10b-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[20_20, 40_40, 1_11_10, 2_82_80] , groups_width=10_10 ),
}
snake_case: List[Any] = NameToOurModelFuncMap()
snake_case: Union[str, Any] = NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(__A : str , __A : Callable[[], nn.Module] ) -> Tuple[nn.Module, Dict]:
snake_case: Dict = torch.hub.load_state_dict_from_url(__A , model_dir=str(__A ) , map_location='cpu' )
snake_case: int = model_func()
# check if we have a head, if yes add it
snake_case: Tuple = files['classy_state_dict']['base_model']['model']
snake_case: str = model_state_dict['trunk']
model.load_state_dict(__A )
return model.eval(), model_state_dict["heads"]
# pretrained
snake_case: Any = partial(
__A , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
snake_case: int = partial(
__A , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
snake_case: int = partial(
__A , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
snake_case: Union[str, Any] = partial(
__A , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch' , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=10_10 , w_a=17_44 , w_a=6_20.83 , w_m=2.52 ) ) ) , )
# IN1K finetuned
snake_case: Any = partial(
__A , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
snake_case: Tuple = partial(
__A , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
snake_case: Tuple = partial(
__A , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
snake_case: Optional[int] = partial(
__A , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch' , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=10_10 , w_a=17_44 , w_a=6_20.83 , w_m=2.52 ) ) ) , )
if model_name:
convert_weight_and_push(
__A , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , __A , __A , )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
__A , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , __A , __A , __A , )
return config, expected_shape
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default=None,
type=str,
help=(
"The name of the model you wish to convert, it must be one of the supported regnet* architecture,"
" currently: regnetx-*, regnety-*. If `None`, all of them will the converted."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=Path,
required=True,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
default=True,
type=bool,
required=False,
help="If True, push model and image processor to the hub.",
)
__UpperCAmelCase = parser.parse_args()
__UpperCAmelCase = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 704 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class SCREAMING_SNAKE_CASE ( snake_case , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = RoCBertTokenizer
__UpperCamelCase = None
__UpperCamelCase = False
__UpperCamelCase = True
__UpperCamelCase = filter_non_english
def _UpperCamelCase ( self ):
'''simple docstring'''
super().setUp()
snake_case: Any = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', '你', '好', '是', '谁', 'a', 'b', 'c', 'd']
snake_case: List[Any] = {}
snake_case: List[str] = {}
for i, value in enumerate(SCREAMING_SNAKE_CASE__ ):
snake_case: List[Any] = i
snake_case: Union[str, Any] = i
snake_case: List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
snake_case: Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['word_shape_file'] )
snake_case: str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['word_pronunciation_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
with open(self.word_shape_file , 'w' , encoding='utf-8' ) as word_shape_writer:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ensure_ascii=SCREAMING_SNAKE_CASE__ )
with open(self.word_pronunciation_file , 'w' , encoding='utf-8' ) as word_pronunciation_writer:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ensure_ascii=SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Dict = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
snake_case: Dict = tokenizer.tokenize('你好[SEP]你是谁' )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , ['你', '好', '[SEP]', '你', '是', '谁'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(SCREAMING_SNAKE_CASE__ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(SCREAMING_SNAKE_CASE__ ) , [5, 6, 2, 5, 7, 8] )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Any = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[int] = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Union[str, Any] = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ , strip_accents=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: str = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ , strip_accents=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[int] = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: int = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Tuple = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ , strip_accents=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Any = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ , strip_accents=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: str = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Union[str, Any] = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
snake_case: Union[str, Any] = {}
for i, token in enumerate(SCREAMING_SNAKE_CASE__ ):
snake_case: str = i
snake_case: Optional[int] = RoCBertWordpieceTokenizer(vocab=SCREAMING_SNAKE_CASE__ , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
def _UpperCamelCase ( self ):
'''simple docstring'''
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def _UpperCamelCase ( self ):
'''simple docstring'''
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def _UpperCamelCase ( self ):
'''simple docstring'''
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[int] = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(SCREAMING_SNAKE_CASE__ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
if self.test_rust_tokenizer:
snake_case: int = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE__ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
def _UpperCamelCase ( self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
snake_case: Tuple = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
snake_case: Optional[Any] = F"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
snake_case: List[str] = tokenizer_r.encode_plus(
SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , return_token_type_ids=SCREAMING_SNAKE_CASE__ , return_offsets_mapping=SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , )
snake_case: Optional[int] = tokenizer_r.do_lower_case if hasattr(SCREAMING_SNAKE_CASE__ , 'do_lower_case' ) else False
snake_case: int = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'Allen'),
((21, 23), '##NL'),
((23, 24), '##P'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'allen'),
((21, 23), '##nl'),
((23, 24), '##p'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Dict = ['的', '人', '有']
snake_case: Any = ''.join(SCREAMING_SNAKE_CASE__ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
snake_case: Tuple = True
snake_case: List[Any] = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
snake_case: List[Any] = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = tokenizer_p.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
snake_case: str = tokenizer_r.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = tokenizer_r.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ )
snake_case: str = tokenizer_p.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case: Tuple = False
snake_case: Union[str, Any] = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
snake_case: Union[str, Any] = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
snake_case: Union[str, Any] = tokenizer_r.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
snake_case: int = tokenizer_p.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = tokenizer_r.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ )
snake_case: Union[str, Any] = tokenizer_p.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ )
# it is expected that only the first Chinese character is not preceded by "##".
snake_case: Union[str, Any] = [
F"""##{token}""" if idx != 0 else token for idx, token in enumerate(SCREAMING_SNAKE_CASE__ )
]
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@slow
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: int = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
snake_case: int = tokenizer.encode('你好' , add_special_tokens=SCREAMING_SNAKE_CASE__ )
snake_case: Any = tokenizer.encode('你是谁' , add_special_tokens=SCREAMING_SNAKE_CASE__ )
snake_case: Tuple = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE__ )
snake_case: str = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[str] = self.get_tokenizers(do_lower_case=SCREAMING_SNAKE_CASE__ )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
snake_case: Dict = '你好,你是谁'
snake_case: int = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
snake_case: Union[str, Any] = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
snake_case: List[str] = tokenizer.convert_tokens_to_shape_ids(SCREAMING_SNAKE_CASE__ )
snake_case: Union[str, Any] = tokenizer.convert_tokens_to_pronunciation_ids(SCREAMING_SNAKE_CASE__ )
snake_case: Optional[Any] = tokenizer.prepare_for_model(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = tokenizer.encode_plus(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) | 692 | 0 |
'''simple docstring'''
from queue import PriorityQueue
from typing import Any
import numpy as np
def lowerCAmelCase_ ( __A : dict , __A : str , __A : set , __A : set , __A : dict , __A : dict , __A : PriorityQueue , __A : dict , __A : float | int , ):
'''simple docstring'''
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
snake_case: Any = cst_fwd.get(__A , np.inf )
snake_case: int = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
snake_case: Union[str, Any] = new_cost_f
snake_case: Tuple = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
snake_case: List[str] = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def lowerCAmelCase_ ( __A : str , __A : str , __A : dict , __A : dict ):
'''simple docstring'''
snake_case: Optional[Any] = -1
snake_case: Any = set()
snake_case: str = set()
snake_case: int = {source: 0}
snake_case: Dict = {destination: 0}
snake_case: int = {source: None}
snake_case: Union[str, Any] = {destination: None}
snake_case: PriorityQueue[Any] = PriorityQueue()
snake_case: PriorityQueue[Any] = PriorityQueue()
snake_case: Tuple = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
snake_case: List[str] = queue_forward.get()
visited_forward.add(__A )
snake_case: int = queue_backward.get()
visited_backward.add(__A )
snake_case: str = pass_and_relaxation(
__A , __A , __A , __A , __A , __A , __A , __A , __A , )
snake_case: Optional[Any] = pass_and_relaxation(
__A , __A , __A , __A , __A , __A , __A , __A , __A , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
snake_case: Any = shortest_distance
return shortest_path_distance
__UpperCAmelCase = {
"B": [["C", 1]],
"C": [["D", 1]],
"D": [["F", 1]],
"E": [["B", 1], ["G", 2]],
"F": [],
"G": [["F", 1]],
}
__UpperCAmelCase = {
"B": [["E", 1]],
"C": [["B", 1]],
"D": [["C", 1]],
"F": [["D", 1], ["G", 1]],
"E": [[None, np.inf]],
"G": [["E", 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod() | 705 |
'''simple docstring'''
from math import asin, atan, cos, radians, sin, sqrt, tan
__UpperCAmelCase = 6378137.0
__UpperCAmelCase = 6356752.314245
__UpperCAmelCase = 6_378_137
def lowerCAmelCase_ ( __A : float , __A : float , __A : float , __A : float ):
'''simple docstring'''
snake_case: Optional[Any] = (AXIS_A - AXIS_B) / AXIS_A
snake_case: List[Any] = atan((1 - flattening) * tan(radians(__A ) ) )
snake_case: List[Any] = atan((1 - flattening) * tan(radians(__A ) ) )
snake_case: Tuple = radians(__A )
snake_case: Tuple = radians(__A )
# Equation
snake_case: List[Any] = sin((phi_a - phi_a) / 2 )
snake_case: Dict = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
snake_case: Union[str, Any] = sqrt(sin_sq_phi + (cos(__A ) * cos(__A ) * sin_sq_lambda) )
return 2 * RADIUS * asin(__A )
if __name__ == "__main__":
import doctest
doctest.testmod() | 692 | 0 |
'''simple docstring'''
import os
import sys
import unittest
__UpperCAmelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__UpperCAmelCase = os.path.join(git_repo_path, "src", "transformers")
__UpperCAmelCase = "\n{0} = None\n"
__UpperCAmelCase = "\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n"
__UpperCAmelCase = "\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n"
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[str] = find_backend(' _import_structure["models.albert"].append("AlbertTokenizerFast")' )
self.assertIsNone(SCREAMING_SNAKE_CASE__ )
snake_case: List[str] = find_backend(' if not is_tokenizers_available():' )
self.assertEqual(SCREAMING_SNAKE_CASE__ , 'tokenizers' )
snake_case: List[Any] = find_backend(' if not is_tensorflow_text_available():' )
self.assertEqual(SCREAMING_SNAKE_CASE__ , 'tensorflow_text' )
snake_case: int = find_backend(' if not (is_sentencepiece_available() and is_tokenizers_available()):' )
self.assertEqual(SCREAMING_SNAKE_CASE__ , 'sentencepiece_and_tokenizers' )
snake_case: Optional[Any] = find_backend(
' if not (is_sentencepiece_available() and is_tensorflow_text_available()):' )
self.assertEqual(SCREAMING_SNAKE_CASE__ , 'sentencepiece_and_tensorflow_text' )
snake_case: Dict = find_backend(
' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):' )
self.assertEqual(SCREAMING_SNAKE_CASE__ , 'sentencepiece_and_tokenizers_and_vision' )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: str = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('torch' , SCREAMING_SNAKE_CASE__ )
self.assertIn('tensorflow_text' , SCREAMING_SNAKE_CASE__ )
self.assertIn('sentencepiece_and_tokenizers' , SCREAMING_SNAKE_CASE__ )
# Likewise, we can't assert on the exact content of a key
self.assertIn('BertModel' , objects['torch'] )
self.assertIn('TFBertModel' , objects['tf'] )
self.assertIn('FlaxBertModel' , objects['flax'] )
self.assertIn('BertModel' , objects['torch'] )
self.assertIn('TFBertTokenizer' , objects['tensorflow_text'] )
self.assertIn('convert_slow_tokenizer' , objects['sentencepiece_and_tokenizers'] )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Dict = create_dummy_object('CONSTANT' , '\'torch\'' )
self.assertEqual(SCREAMING_SNAKE_CASE__ , '\nCONSTANT = None\n' )
snake_case: Any = create_dummy_object('function' , '\'torch\'' )
self.assertEqual(
SCREAMING_SNAKE_CASE__ , '\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n' )
snake_case: Optional[int] = '\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n'
snake_case: Tuple = create_dummy_object('FakeClass' , '\'torch\'' )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Dict = '# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, ["torch"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = ["torch"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, ["torch"])\n'
snake_case: Optional[int] = create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']} )
self.assertEqual(dummy_files['torch'] , SCREAMING_SNAKE_CASE__ ) | 706 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase = {
"configuration_roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerOnnxConfig"],
"tokenization_roformer": ["RoFormerTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ["RoFormerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoFormerForCausalLM",
"RoFormerForMaskedLM",
"RoFormerForMultipleChoice",
"RoFormerForQuestionAnswering",
"RoFormerForSequenceClassification",
"RoFormerForTokenClassification",
"RoFormerLayer",
"RoFormerModel",
"RoFormerPreTrainedModel",
"load_tf_weights_in_roformer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRoFormerForCausalLM",
"TFRoFormerForMaskedLM",
"TFRoFormerForMultipleChoice",
"TFRoFormerForQuestionAnswering",
"TFRoFormerForSequenceClassification",
"TFRoFormerForTokenClassification",
"TFRoFormerLayer",
"TFRoFormerModel",
"TFRoFormerPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxRoFormerForMaskedLM",
"FlaxRoFormerForMultipleChoice",
"FlaxRoFormerForQuestionAnswering",
"FlaxRoFormerForSequenceClassification",
"FlaxRoFormerForTokenClassification",
"FlaxRoFormerModel",
"FlaxRoFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 692 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
snake_case: Union[str, Any] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
snake_case: str = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , )
return model
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
snake_case: Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModel(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[int] = self.dummy_uncond_unet
snake_case: Optional[Any] = DDIMScheduler()
snake_case: Union[str, Any] = self.dummy_vq_model
snake_case: Union[str, Any] = LDMPipeline(unet=SCREAMING_SNAKE_CASE__ , vqvae=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
ldm.to(SCREAMING_SNAKE_CASE__ )
ldm.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
snake_case: Tuple = torch.manual_seed(0 )
snake_case: Dict = ldm(generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , output_type='numpy' ).images
snake_case: Union[str, Any] = torch.manual_seed(0 )
snake_case: List[Any] = ldm(generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , output_type='numpy' , return_dict=SCREAMING_SNAKE_CASE__ )[0]
snake_case: Union[str, Any] = image[0, -3:, -3:, -1]
snake_case: Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case: Optional[int] = np.array([0.85_12, 0.8_18, 0.64_11, 0.68_08, 0.44_65, 0.56_18, 0.46, 0.62_31, 0.51_72] )
snake_case: Union[str, Any] = 1E-2 if torch_device != 'mps' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[int] = LDMPipeline.from_pretrained('CompVis/ldm-celebahq-256' )
ldm.to(SCREAMING_SNAKE_CASE__ )
ldm.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
snake_case: int = torch.manual_seed(0 )
snake_case: Union[str, Any] = ldm(generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=5 , output_type='numpy' ).images
snake_case: Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
snake_case: Optional[int] = np.array([0.43_99, 0.4_49_75, 0.4_68_25, 0.4_74, 0.43_59, 0.45_81, 0.4_50_95, 0.43_41, 0.44_47] )
snake_case: List[Any] = 1E-2 if torch_device != 'mps' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance | 707 |
'''simple docstring'''
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def lowerCAmelCase_ ( __A : Tuple ):
'''simple docstring'''
snake_case: Tuple = model.config
snake_case: str = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=1_28 , )
snake_case: Optional[Any] = MBartConfig(
is_decoder=__A , is_encoder_decoder=__A , add_cross_attention=__A , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=__A , add_final_layer_norm=__A , )
return encoder_config, decoder_config
def lowerCAmelCase_ ( __A : int ):
'''simple docstring'''
if "encoder.model" in name:
snake_case: Optional[Any] = name.replace('encoder.model' , 'encoder' )
if "decoder.model" in name:
snake_case: str = name.replace('decoder.model' , 'decoder' )
if "patch_embed.proj" in name:
snake_case: Any = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
snake_case: Optional[int] = name.replace('patch_embed.norm' , 'embeddings.norm' )
if name.startswith('encoder' ):
if "layers" in name:
snake_case: Tuple = 'encoder.' + name
if "attn.proj" in name:
snake_case: Optional[int] = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name and "mask" not in name:
snake_case: Dict = name.replace('attn' , 'attention.self' )
if "norm1" in name:
snake_case: Union[str, Any] = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
snake_case: Dict = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
snake_case: List[str] = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
snake_case: Dict = name.replace('mlp.fc2' , 'output.dense' )
if name == "encoder.norm.weight":
snake_case: Dict = 'encoder.layernorm.weight'
if name == "encoder.norm.bias":
snake_case: int = 'encoder.layernorm.bias'
return name
def lowerCAmelCase_ ( __A : List[Any] , __A : Optional[Any] ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
snake_case: List[Any] = orig_state_dict.pop(__A )
if "qkv" in key:
snake_case: Union[str, Any] = key.split('.' )
snake_case: Optional[Any] = int(key_split[3] )
snake_case: Any = int(key_split[5] )
snake_case: Union[str, Any] = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
snake_case: Union[str, Any] = val[:dim, :]
snake_case: Any = val[dim : dim * 2, :]
snake_case: List[str] = val[-dim:, :]
else:
snake_case: str = val[:dim]
snake_case: Union[str, Any] = val[dim : dim * 2]
snake_case: List[Any] = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
snake_case: Optional[int] = val
return orig_state_dict
def lowerCAmelCase_ ( __A : List[Any] , __A : Any=None , __A : List[str]=False ):
'''simple docstring'''
snake_case: str = DonutModel.from_pretrained(__A ).eval()
# load HuggingFace model
snake_case , snake_case: Optional[Any] = get_configs(__A )
snake_case: Optional[int] = DonutSwinModel(__A )
snake_case: Tuple = MBartForCausalLM(__A )
snake_case: Optional[Any] = VisionEncoderDecoderModel(encoder=__A , decoder=__A )
model.eval()
snake_case: Optional[int] = original_model.state_dict()
snake_case: Optional[int] = convert_state_dict(__A , __A )
model.load_state_dict(__A )
# verify results on scanned document
snake_case: Union[str, Any] = load_dataset('hf-internal-testing/example-documents' )
snake_case: str = dataset['test'][0]['image'].convert('RGB' )
snake_case: Optional[int] = XLMRobertaTokenizerFast.from_pretrained(__A , from_slow=__A )
snake_case: Any = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
snake_case: Dict = DonutProcessor(__A , __A )
snake_case: Optional[Any] = processor(__A , return_tensors='pt' ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
snake_case: int = '<s_docvqa><s_question>{user_input}</s_question><s_answer>'
snake_case: Optional[Any] = 'When is the coffee break?'
snake_case: Optional[int] = task_prompt.replace('{user_input}' , __A )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
snake_case: Dict = '<s_rvlcdip>'
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
snake_case: str = '<s_cord>'
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
snake_case: str = 's_cord-v2>'
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
snake_case: int = '<s_zhtrainticket>'
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
snake_case: Optional[Any] = 'hello world'
else:
raise ValueError('Model name not supported' )
snake_case: Optional[int] = original_model.decoder.tokenizer(__A , add_special_tokens=__A , return_tensors='pt' )[
'input_ids'
]
snake_case: Any = original_model.encoder.model.patch_embed(__A )
snake_case , snake_case: Dict = model.encoder.embeddings(__A )
assert torch.allclose(__A , __A , atol=1E-3 )
# verify encoder hidden states
snake_case: Tuple = original_model.encoder(__A )
snake_case: List[str] = model.encoder(__A ).last_hidden_state
assert torch.allclose(__A , __A , atol=1E-2 )
# verify decoder hidden states
snake_case: List[Any] = original_model(__A , __A , __A ).logits
snake_case: List[Any] = model(__A , decoder_input_ids=__A ).logits
assert torch.allclose(__A , __A , atol=1E-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(f"""Saving model and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(__A )
processor.save_pretrained(__A )
if push_to_hub:
model.push_to_hub('nielsr/' + model_name.split('/' )[-1] , commit_message='Update model' )
processor.push_to_hub('nielsr/' + model_name.split('/' )[-1] , commit_message='Update model' )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="naver-clova-ix/donut-base-finetuned-docvqa",
required=False,
type=str,
help="Name of the original model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
required=False,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether or not to push the converted model and processor to the 🤗 hub.",
)
__UpperCAmelCase = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 692 | 0 |
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
__UpperCAmelCase = (3, 9, -11, 0, 7, 5, 1, -1)
__UpperCAmelCase = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
__UpperCamelCase = 42
__UpperCamelCase = 42
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: Node | None = None
for i in sorted(SCREAMING_SNAKE_CASE__ , reverse=SCREAMING_SNAKE_CASE__ ):
snake_case: List[Any] = Node(SCREAMING_SNAKE_CASE__ , self.head )
def __iter__( self ):
'''simple docstring'''
snake_case: str = self.head
while node:
yield node.data
snake_case: Dict = node.next_node
def __len__( self ):
'''simple docstring'''
return sum(1 for _ in self )
def __str__( self ):
'''simple docstring'''
return " -> ".join([str(SCREAMING_SNAKE_CASE__ ) for node in self] )
def lowerCAmelCase_ ( __A : SortedLinkedList , __A : SortedLinkedList ):
'''simple docstring'''
return SortedLinkedList(list(__A ) + list(__A ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even))) | 708 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[Any] = {
'task_specific_params': {
'summarization': {'length_penalty': 1.0, 'max_length': 1_28, 'min_length': 12, 'num_beams': 4},
'summarization_cnn': {'length_penalty': 2.0, 'max_length': 1_42, 'min_length': 56, 'num_beams': 4},
'summarization_xsum': {'length_penalty': 1.0, 'max_length': 62, 'min_length': 11, 'num_beams': 6},
}
}
snake_case: Union[str, Any] = {
'task_specific_params.summarization.length_penalty': 1.0,
'task_specific_params.summarization.max_length': 1_28,
'task_specific_params.summarization.min_length': 12,
'task_specific_params.summarization.num_beams': 4,
'task_specific_params.summarization_cnn.length_penalty': 2.0,
'task_specific_params.summarization_cnn.max_length': 1_42,
'task_specific_params.summarization_cnn.min_length': 56,
'task_specific_params.summarization_cnn.num_beams': 4,
'task_specific_params.summarization_xsum.length_penalty': 1.0,
'task_specific_params.summarization_xsum.max_length': 62,
'task_specific_params.summarization_xsum.min_length': 11,
'task_specific_params.summarization_xsum.num_beams': 6,
}
self.assertEqual(flatten_dict(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Dict = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(SCREAMING_SNAKE_CASE__ ) , x.transpose() ) )
snake_case: List[str] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(SCREAMING_SNAKE_CASE__ , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Tuple = np.random.randn(3 , 4 )
snake_case: Optional[Any] = torch.tensor(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(transpose(SCREAMING_SNAKE_CASE__ ) , transpose(SCREAMING_SNAKE_CASE__ ).numpy() ) )
snake_case: Optional[Any] = np.random.randn(3 , 4 , 5 )
snake_case: Optional[int] = torch.tensor(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(transpose(SCREAMING_SNAKE_CASE__ , axes=(1, 2, 0) ) , transpose(SCREAMING_SNAKE_CASE__ , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: str = np.random.randn(3 , 4 )
snake_case: Optional[Any] = tf.constant(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(transpose(SCREAMING_SNAKE_CASE__ ) , transpose(SCREAMING_SNAKE_CASE__ ).numpy() ) )
snake_case: Dict = np.random.randn(3 , 4 , 5 )
snake_case: str = tf.constant(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(transpose(SCREAMING_SNAKE_CASE__ , axes=(1, 2, 0) ) , transpose(SCREAMING_SNAKE_CASE__ , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: int = np.random.randn(3 , 4 )
snake_case: Optional[int] = jnp.array(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(transpose(SCREAMING_SNAKE_CASE__ ) , np.asarray(transpose(SCREAMING_SNAKE_CASE__ ) ) ) )
snake_case: Any = np.random.randn(3 , 4 , 5 )
snake_case: Optional[Any] = jnp.array(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(transpose(SCREAMING_SNAKE_CASE__ , axes=(1, 2, 0) ) , np.asarray(transpose(SCREAMING_SNAKE_CASE__ , axes=(1, 2, 0) ) ) ) )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[Any] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(SCREAMING_SNAKE_CASE__ , (4, 3) ) , np.reshape(SCREAMING_SNAKE_CASE__ , (4, 3) ) ) )
snake_case: Optional[int] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(SCREAMING_SNAKE_CASE__ , (12, 5) ) , np.reshape(SCREAMING_SNAKE_CASE__ , (12, 5) ) ) )
@require_torch
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[Any] = np.random.randn(3 , 4 )
snake_case: Tuple = torch.tensor(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(reshape(SCREAMING_SNAKE_CASE__ , (4, 3) ) , reshape(SCREAMING_SNAKE_CASE__ , (4, 3) ).numpy() ) )
snake_case: List[str] = np.random.randn(3 , 4 , 5 )
snake_case: Tuple = torch.tensor(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(reshape(SCREAMING_SNAKE_CASE__ , (12, 5) ) , reshape(SCREAMING_SNAKE_CASE__ , (12, 5) ).numpy() ) )
@require_tf
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[Any] = np.random.randn(3 , 4 )
snake_case: Tuple = tf.constant(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(reshape(SCREAMING_SNAKE_CASE__ , (4, 3) ) , reshape(SCREAMING_SNAKE_CASE__ , (4, 3) ).numpy() ) )
snake_case: Any = np.random.randn(3 , 4 , 5 )
snake_case: Union[str, Any] = tf.constant(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(reshape(SCREAMING_SNAKE_CASE__ , (12, 5) ) , reshape(SCREAMING_SNAKE_CASE__ , (12, 5) ).numpy() ) )
@require_flax
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: int = np.random.randn(3 , 4 )
snake_case: str = jnp.array(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(reshape(SCREAMING_SNAKE_CASE__ , (4, 3) ) , np.asarray(reshape(SCREAMING_SNAKE_CASE__ , (4, 3) ) ) ) )
snake_case: Any = np.random.randn(3 , 4 , 5 )
snake_case: List[str] = jnp.array(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(reshape(SCREAMING_SNAKE_CASE__ , (12, 5) ) , np.asarray(reshape(SCREAMING_SNAKE_CASE__ , (12, 5) ) ) ) )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: int = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(SCREAMING_SNAKE_CASE__ ) , np.squeeze(SCREAMING_SNAKE_CASE__ ) ) )
snake_case: List[str] = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(SCREAMING_SNAKE_CASE__ , axis=2 ) , np.squeeze(SCREAMING_SNAKE_CASE__ , axis=2 ) ) )
@require_torch
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: int = np.random.randn(1 , 3 , 4 )
snake_case: List[str] = torch.tensor(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(squeeze(SCREAMING_SNAKE_CASE__ ) , squeeze(SCREAMING_SNAKE_CASE__ ).numpy() ) )
snake_case: Optional[Any] = np.random.randn(1 , 4 , 1 , 5 )
snake_case: int = torch.tensor(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(squeeze(SCREAMING_SNAKE_CASE__ , axis=2 ) , squeeze(SCREAMING_SNAKE_CASE__ , axis=2 ).numpy() ) )
@require_tf
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Any = np.random.randn(1 , 3 , 4 )
snake_case: Optional[Any] = tf.constant(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(squeeze(SCREAMING_SNAKE_CASE__ ) , squeeze(SCREAMING_SNAKE_CASE__ ).numpy() ) )
snake_case: Union[str, Any] = np.random.randn(1 , 4 , 1 , 5 )
snake_case: Union[str, Any] = tf.constant(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(squeeze(SCREAMING_SNAKE_CASE__ , axis=2 ) , squeeze(SCREAMING_SNAKE_CASE__ , axis=2 ).numpy() ) )
@require_flax
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[int] = np.random.randn(1 , 3 , 4 )
snake_case: List[Any] = jnp.array(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(squeeze(SCREAMING_SNAKE_CASE__ ) , np.asarray(squeeze(SCREAMING_SNAKE_CASE__ ) ) ) )
snake_case: Tuple = np.random.randn(1 , 4 , 1 , 5 )
snake_case: Tuple = jnp.array(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(squeeze(SCREAMING_SNAKE_CASE__ , axis=2 ) , np.asarray(squeeze(SCREAMING_SNAKE_CASE__ , axis=2 ) ) ) )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[int] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(SCREAMING_SNAKE_CASE__ , axis=1 ) , np.expand_dims(SCREAMING_SNAKE_CASE__ , axis=1 ) ) )
@require_torch
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: int = np.random.randn(3 , 4 )
snake_case: Any = torch.tensor(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(expand_dims(SCREAMING_SNAKE_CASE__ , axis=1 ) , expand_dims(SCREAMING_SNAKE_CASE__ , axis=1 ).numpy() ) )
@require_tf
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: str = np.random.randn(3 , 4 )
snake_case: Any = tf.constant(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(expand_dims(SCREAMING_SNAKE_CASE__ , axis=1 ) , expand_dims(SCREAMING_SNAKE_CASE__ , axis=1 ).numpy() ) )
@require_flax
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[Any] = np.random.randn(3 , 4 )
snake_case: int = jnp.array(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(expand_dims(SCREAMING_SNAKE_CASE__ , axis=1 ) , np.asarray(expand_dims(SCREAMING_SNAKE_CASE__ , axis=1 ) ) ) ) | 692 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__UpperCAmelCase = {
"configuration_perceiver": ["PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP", "PerceiverConfig", "PerceiverOnnxConfig"],
"tokenization_perceiver": ["PerceiverTokenizer"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ["PerceiverFeatureExtractor"]
__UpperCAmelCase = ["PerceiverImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST",
"PerceiverForImageClassificationConvProcessing",
"PerceiverForImageClassificationFourier",
"PerceiverForImageClassificationLearned",
"PerceiverForMaskedLM",
"PerceiverForMultimodalAutoencoding",
"PerceiverForOpticalFlow",
"PerceiverForSequenceClassification",
"PerceiverLayer",
"PerceiverModel",
"PerceiverPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 709 |
'''simple docstring'''
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
__UpperCAmelCase = logging.get_logger(__name__)
# General docstring
__UpperCAmelCase = "PoolFormerConfig"
# Base docstring
__UpperCAmelCase = "sail/poolformer_s12"
__UpperCAmelCase = [1, 512, 7, 7]
# Image classification docstring
__UpperCAmelCase = "sail/poolformer_s12"
__UpperCAmelCase = "tabby, tabby cat"
__UpperCAmelCase = [
"sail/poolformer_s12",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def lowerCAmelCase_ ( __A : Tuple , __A : float = 0.0 , __A : bool = False ):
'''simple docstring'''
if drop_prob == 0.0 or not training:
return input
snake_case: Union[str, Any] = 1 - drop_prob
snake_case: List[Any] = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
snake_case: List[Any] = keep_prob + torch.rand(__A , dtype=input.dtype , device=input.device )
random_tensor.floor_() # binarize
snake_case: Any = input.div(__A ) * random_tensor
return output
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ = None ):
'''simple docstring'''
super().__init__()
snake_case: List[str] = drop_prob
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return drop_path(SCREAMING_SNAKE_CASE__ , self.drop_prob , self.training )
def _UpperCamelCase ( self ):
'''simple docstring'''
return "p={}".format(self.drop_prob )
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
super().__init__()
snake_case: List[str] = patch_size if isinstance(SCREAMING_SNAKE_CASE__ , collections.abc.Iterable ) else (patch_size, patch_size)
snake_case: List[str] = stride if isinstance(SCREAMING_SNAKE_CASE__ , collections.abc.Iterable ) else (stride, stride)
snake_case: Union[str, Any] = padding if isinstance(SCREAMING_SNAKE_CASE__ , collections.abc.Iterable ) else (padding, padding)
snake_case: Any = nn.Convad(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , kernel_size=SCREAMING_SNAKE_CASE__ , stride=SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ )
snake_case: Tuple = norm_layer(SCREAMING_SNAKE_CASE__ ) if norm_layer else nn.Identity()
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: int = self.projection(SCREAMING_SNAKE_CASE__ )
snake_case: Any = self.norm(SCREAMING_SNAKE_CASE__ )
return embeddings
class SCREAMING_SNAKE_CASE ( nn.GroupNorm ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
super().__init__(1 , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
super().__init__()
snake_case: str = nn.AvgPoolad(SCREAMING_SNAKE_CASE__ , stride=1 , padding=pool_size // 2 , count_include_pad=SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return self.pool(SCREAMING_SNAKE_CASE__ ) - hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
super().__init__()
snake_case: List[Any] = nn.Convad(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 1 )
snake_case: Any = nn.Convad(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 1 )
snake_case: str = PoolFormerDropPath(SCREAMING_SNAKE_CASE__ )
if isinstance(config.hidden_act , SCREAMING_SNAKE_CASE__ ):
snake_case: Tuple = ACTaFN[config.hidden_act]
else:
snake_case: int = config.hidden_act
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: Dict = self.conva(SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = self.act_fn(SCREAMING_SNAKE_CASE__ )
snake_case: List[str] = self.drop(SCREAMING_SNAKE_CASE__ )
snake_case: List[str] = self.conva(SCREAMING_SNAKE_CASE__ )
snake_case: str = self.drop(SCREAMING_SNAKE_CASE__ )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
super().__init__()
snake_case: Tuple = PoolFormerPooling(SCREAMING_SNAKE_CASE__ )
snake_case: Dict = PoolFormerOutput(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case: Dict = PoolFormerGroupNorm(SCREAMING_SNAKE_CASE__ )
snake_case: List[Any] = PoolFormerGroupNorm(SCREAMING_SNAKE_CASE__ )
# Useful for training neural nets
snake_case: Union[str, Any] = PoolFormerDropPath(SCREAMING_SNAKE_CASE__ ) if drop_path > 0.0 else nn.Identity()
snake_case: Optional[Any] = config.use_layer_scale
if config.use_layer_scale:
snake_case: Any = nn.Parameter(
config.layer_scale_init_value * torch.ones((SCREAMING_SNAKE_CASE__) ) , requires_grad=SCREAMING_SNAKE_CASE__ )
snake_case: int = nn.Parameter(
config.layer_scale_init_value * torch.ones((SCREAMING_SNAKE_CASE__) ) , requires_grad=SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if self.use_layer_scale:
snake_case: str = self.pooling(self.before_norm(SCREAMING_SNAKE_CASE__ ) )
snake_case: Dict = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
snake_case: str = hidden_states + self.drop_path(SCREAMING_SNAKE_CASE__ )
snake_case: Dict = ()
snake_case: Dict = self.output(self.after_norm(SCREAMING_SNAKE_CASE__ ) )
snake_case: Union[str, Any] = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
snake_case: Any = hidden_states + self.drop_path(SCREAMING_SNAKE_CASE__ )
snake_case: List[str] = (output,) + outputs
return outputs
else:
snake_case: Optional[Any] = self.drop_path(self.pooling(self.before_norm(SCREAMING_SNAKE_CASE__ ) ) )
# First residual connection
snake_case: Union[str, Any] = pooling_output + hidden_states
snake_case: List[Any] = ()
# Second residual connection inside the PoolFormerOutput block
snake_case: List[str] = self.drop_path(self.output(self.after_norm(SCREAMING_SNAKE_CASE__ ) ) )
snake_case: Dict = hidden_states + layer_output
snake_case: Optional[Any] = (output,) + outputs
return outputs
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
super().__init__()
snake_case: List[Any] = config
# stochastic depth decay rule
snake_case: List[Any] = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
snake_case: Union[str, Any] = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
snake_case: List[Any] = nn.ModuleList(SCREAMING_SNAKE_CASE__ )
# Transformer blocks
snake_case: str = []
snake_case: int = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
snake_case: List[str] = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
SCREAMING_SNAKE_CASE__ , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(SCREAMING_SNAKE_CASE__ ) )
snake_case: Tuple = nn.ModuleList(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=True ):
'''simple docstring'''
snake_case: str = () if output_hidden_states else None
snake_case: Dict = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
snake_case , snake_case: Dict = layers
# Get patch embeddings from hidden_states
snake_case: int = embedding_layer(SCREAMING_SNAKE_CASE__ )
# Send the embeddings through the blocks
for _, blk in enumerate(SCREAMING_SNAKE_CASE__ ):
snake_case: List[Any] = blk(SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = layer_outputs[0]
if output_hidden_states:
snake_case: List[str] = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=SCREAMING_SNAKE_CASE__ , hidden_states=SCREAMING_SNAKE_CASE__ )
class SCREAMING_SNAKE_CASE ( snake_case ):
'''simple docstring'''
__UpperCamelCase = PoolFormerConfig
__UpperCamelCase = "poolformer"
__UpperCamelCase = "pixel_values"
__UpperCamelCase = True
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE__ , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(SCREAMING_SNAKE_CASE__ , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False ):
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case: List[Any] = value
__UpperCAmelCase = R"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
__UpperCAmelCase = R"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n"
@add_start_docstrings(
"The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top." , snake_case , )
class SCREAMING_SNAKE_CASE ( snake_case ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
super().__init__(SCREAMING_SNAKE_CASE__ )
snake_case: List[Any] = config
snake_case: Tuple = PoolFormerEncoder(SCREAMING_SNAKE_CASE__ )
# Initialize weights and apply final processing
self.post_init()
def _UpperCamelCase ( self ):
'''simple docstring'''
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=SCREAMING_SNAKE_CASE__ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , ):
'''simple docstring'''
snake_case: Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
snake_case: List[Any] = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('You have to specify pixel_values' )
snake_case: Optional[Any] = self.encoder(
SCREAMING_SNAKE_CASE__ , output_hidden_states=SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ , )
snake_case: List[Any] = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=SCREAMING_SNAKE_CASE__ , hidden_states=encoder_outputs.hidden_states , )
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
super().__init__()
snake_case: Any = nn.Linear(config.hidden_size , config.hidden_size )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: int = self.dense(SCREAMING_SNAKE_CASE__ )
return output
@add_start_docstrings(
"\n PoolFormer Model transformer with an image classification head on top\n " , snake_case , )
class SCREAMING_SNAKE_CASE ( snake_case ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
super().__init__(SCREAMING_SNAKE_CASE__ )
snake_case: Union[str, Any] = config.num_labels
snake_case: str = PoolFormerModel(SCREAMING_SNAKE_CASE__ )
# Final norm
snake_case: int = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
snake_case: Dict = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=SCREAMING_SNAKE_CASE__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , ):
'''simple docstring'''
snake_case: Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
snake_case: Optional[Any] = self.poolformer(
SCREAMING_SNAKE_CASE__ , output_hidden_states=SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ , )
snake_case: Any = outputs[0]
snake_case: str = self.classifier(self.norm(SCREAMING_SNAKE_CASE__ ).mean([-2, -1] ) )
snake_case: Any = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
snake_case: Tuple = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
snake_case: Dict = 'single_label_classification'
else:
snake_case: List[str] = 'multi_label_classification'
if self.config.problem_type == "regression":
snake_case: Union[str, Any] = MSELoss()
if self.num_labels == 1:
snake_case: List[str] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
snake_case: int = loss_fct(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif self.config.problem_type == "single_label_classification":
snake_case: Union[str, Any] = CrossEntropyLoss()
snake_case: Dict = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
snake_case: int = BCEWithLogitsLoss()
snake_case: Optional[int] = loss_fct(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if not return_dict:
snake_case: str = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=SCREAMING_SNAKE_CASE__ , logits=SCREAMING_SNAKE_CASE__ , hidden_states=outputs.hidden_states ) | 692 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[Any] = {
'task_specific_params': {
'summarization': {'length_penalty': 1.0, 'max_length': 1_28, 'min_length': 12, 'num_beams': 4},
'summarization_cnn': {'length_penalty': 2.0, 'max_length': 1_42, 'min_length': 56, 'num_beams': 4},
'summarization_xsum': {'length_penalty': 1.0, 'max_length': 62, 'min_length': 11, 'num_beams': 6},
}
}
snake_case: Union[str, Any] = {
'task_specific_params.summarization.length_penalty': 1.0,
'task_specific_params.summarization.max_length': 1_28,
'task_specific_params.summarization.min_length': 12,
'task_specific_params.summarization.num_beams': 4,
'task_specific_params.summarization_cnn.length_penalty': 2.0,
'task_specific_params.summarization_cnn.max_length': 1_42,
'task_specific_params.summarization_cnn.min_length': 56,
'task_specific_params.summarization_cnn.num_beams': 4,
'task_specific_params.summarization_xsum.length_penalty': 1.0,
'task_specific_params.summarization_xsum.max_length': 62,
'task_specific_params.summarization_xsum.min_length': 11,
'task_specific_params.summarization_xsum.num_beams': 6,
}
self.assertEqual(flatten_dict(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Dict = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(SCREAMING_SNAKE_CASE__ ) , x.transpose() ) )
snake_case: List[str] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(SCREAMING_SNAKE_CASE__ , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Tuple = np.random.randn(3 , 4 )
snake_case: Optional[Any] = torch.tensor(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(transpose(SCREAMING_SNAKE_CASE__ ) , transpose(SCREAMING_SNAKE_CASE__ ).numpy() ) )
snake_case: Optional[Any] = np.random.randn(3 , 4 , 5 )
snake_case: Optional[int] = torch.tensor(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(transpose(SCREAMING_SNAKE_CASE__ , axes=(1, 2, 0) ) , transpose(SCREAMING_SNAKE_CASE__ , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: str = np.random.randn(3 , 4 )
snake_case: Optional[Any] = tf.constant(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(transpose(SCREAMING_SNAKE_CASE__ ) , transpose(SCREAMING_SNAKE_CASE__ ).numpy() ) )
snake_case: Dict = np.random.randn(3 , 4 , 5 )
snake_case: str = tf.constant(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(transpose(SCREAMING_SNAKE_CASE__ , axes=(1, 2, 0) ) , transpose(SCREAMING_SNAKE_CASE__ , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: int = np.random.randn(3 , 4 )
snake_case: Optional[int] = jnp.array(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(transpose(SCREAMING_SNAKE_CASE__ ) , np.asarray(transpose(SCREAMING_SNAKE_CASE__ ) ) ) )
snake_case: Any = np.random.randn(3 , 4 , 5 )
snake_case: Optional[Any] = jnp.array(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(transpose(SCREAMING_SNAKE_CASE__ , axes=(1, 2, 0) ) , np.asarray(transpose(SCREAMING_SNAKE_CASE__ , axes=(1, 2, 0) ) ) ) )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[Any] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(SCREAMING_SNAKE_CASE__ , (4, 3) ) , np.reshape(SCREAMING_SNAKE_CASE__ , (4, 3) ) ) )
snake_case: Optional[int] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(SCREAMING_SNAKE_CASE__ , (12, 5) ) , np.reshape(SCREAMING_SNAKE_CASE__ , (12, 5) ) ) )
@require_torch
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[Any] = np.random.randn(3 , 4 )
snake_case: Tuple = torch.tensor(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(reshape(SCREAMING_SNAKE_CASE__ , (4, 3) ) , reshape(SCREAMING_SNAKE_CASE__ , (4, 3) ).numpy() ) )
snake_case: List[str] = np.random.randn(3 , 4 , 5 )
snake_case: Tuple = torch.tensor(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(reshape(SCREAMING_SNAKE_CASE__ , (12, 5) ) , reshape(SCREAMING_SNAKE_CASE__ , (12, 5) ).numpy() ) )
@require_tf
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[Any] = np.random.randn(3 , 4 )
snake_case: Tuple = tf.constant(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(reshape(SCREAMING_SNAKE_CASE__ , (4, 3) ) , reshape(SCREAMING_SNAKE_CASE__ , (4, 3) ).numpy() ) )
snake_case: Any = np.random.randn(3 , 4 , 5 )
snake_case: Union[str, Any] = tf.constant(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(reshape(SCREAMING_SNAKE_CASE__ , (12, 5) ) , reshape(SCREAMING_SNAKE_CASE__ , (12, 5) ).numpy() ) )
@require_flax
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: int = np.random.randn(3 , 4 )
snake_case: str = jnp.array(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(reshape(SCREAMING_SNAKE_CASE__ , (4, 3) ) , np.asarray(reshape(SCREAMING_SNAKE_CASE__ , (4, 3) ) ) ) )
snake_case: Any = np.random.randn(3 , 4 , 5 )
snake_case: List[str] = jnp.array(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(reshape(SCREAMING_SNAKE_CASE__ , (12, 5) ) , np.asarray(reshape(SCREAMING_SNAKE_CASE__ , (12, 5) ) ) ) )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: int = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(SCREAMING_SNAKE_CASE__ ) , np.squeeze(SCREAMING_SNAKE_CASE__ ) ) )
snake_case: List[str] = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(SCREAMING_SNAKE_CASE__ , axis=2 ) , np.squeeze(SCREAMING_SNAKE_CASE__ , axis=2 ) ) )
@require_torch
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: int = np.random.randn(1 , 3 , 4 )
snake_case: List[str] = torch.tensor(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(squeeze(SCREAMING_SNAKE_CASE__ ) , squeeze(SCREAMING_SNAKE_CASE__ ).numpy() ) )
snake_case: Optional[Any] = np.random.randn(1 , 4 , 1 , 5 )
snake_case: int = torch.tensor(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(squeeze(SCREAMING_SNAKE_CASE__ , axis=2 ) , squeeze(SCREAMING_SNAKE_CASE__ , axis=2 ).numpy() ) )
@require_tf
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Any = np.random.randn(1 , 3 , 4 )
snake_case: Optional[Any] = tf.constant(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(squeeze(SCREAMING_SNAKE_CASE__ ) , squeeze(SCREAMING_SNAKE_CASE__ ).numpy() ) )
snake_case: Union[str, Any] = np.random.randn(1 , 4 , 1 , 5 )
snake_case: Union[str, Any] = tf.constant(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(squeeze(SCREAMING_SNAKE_CASE__ , axis=2 ) , squeeze(SCREAMING_SNAKE_CASE__ , axis=2 ).numpy() ) )
@require_flax
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[int] = np.random.randn(1 , 3 , 4 )
snake_case: List[Any] = jnp.array(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(squeeze(SCREAMING_SNAKE_CASE__ ) , np.asarray(squeeze(SCREAMING_SNAKE_CASE__ ) ) ) )
snake_case: Tuple = np.random.randn(1 , 4 , 1 , 5 )
snake_case: Tuple = jnp.array(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(squeeze(SCREAMING_SNAKE_CASE__ , axis=2 ) , np.asarray(squeeze(SCREAMING_SNAKE_CASE__ , axis=2 ) ) ) )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[int] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(SCREAMING_SNAKE_CASE__ , axis=1 ) , np.expand_dims(SCREAMING_SNAKE_CASE__ , axis=1 ) ) )
@require_torch
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: int = np.random.randn(3 , 4 )
snake_case: Any = torch.tensor(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(expand_dims(SCREAMING_SNAKE_CASE__ , axis=1 ) , expand_dims(SCREAMING_SNAKE_CASE__ , axis=1 ).numpy() ) )
@require_tf
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: str = np.random.randn(3 , 4 )
snake_case: Any = tf.constant(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(expand_dims(SCREAMING_SNAKE_CASE__ , axis=1 ) , expand_dims(SCREAMING_SNAKE_CASE__ , axis=1 ).numpy() ) )
@require_flax
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[Any] = np.random.randn(3 , 4 )
snake_case: int = jnp.array(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(expand_dims(SCREAMING_SNAKE_CASE__ , axis=1 ) , np.asarray(expand_dims(SCREAMING_SNAKE_CASE__ , axis=1 ) ) ) ) | 710 |
'''simple docstring'''
from queue import PriorityQueue
from typing import Any
import numpy as np
def lowerCAmelCase_ ( __A : dict , __A : str , __A : set , __A : set , __A : dict , __A : dict , __A : PriorityQueue , __A : dict , __A : float | int , ):
'''simple docstring'''
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
snake_case: Any = cst_fwd.get(__A , np.inf )
snake_case: int = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
snake_case: Union[str, Any] = new_cost_f
snake_case: Tuple = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
snake_case: List[str] = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def lowerCAmelCase_ ( __A : str , __A : str , __A : dict , __A : dict ):
'''simple docstring'''
snake_case: Optional[Any] = -1
snake_case: Any = set()
snake_case: str = set()
snake_case: int = {source: 0}
snake_case: Dict = {destination: 0}
snake_case: int = {source: None}
snake_case: Union[str, Any] = {destination: None}
snake_case: PriorityQueue[Any] = PriorityQueue()
snake_case: PriorityQueue[Any] = PriorityQueue()
snake_case: Tuple = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
snake_case , snake_case: List[str] = queue_forward.get()
visited_forward.add(__A )
snake_case , snake_case: int = queue_backward.get()
visited_backward.add(__A )
snake_case: str = pass_and_relaxation(
__A , __A , __A , __A , __A , __A , __A , __A , __A , )
snake_case: Optional[Any] = pass_and_relaxation(
__A , __A , __A , __A , __A , __A , __A , __A , __A , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
snake_case: Any = shortest_distance
return shortest_path_distance
__UpperCAmelCase = {
"B": [["C", 1]],
"C": [["D", 1]],
"D": [["F", 1]],
"E": [["B", 1], ["G", 2]],
"F": [],
"G": [["F", 1]],
}
__UpperCAmelCase = {
"B": [["E", 1]],
"C": [["B", 1]],
"D": [["C", 1]],
"F": [["D", 1], ["G", 1]],
"E": [[None, np.inf]],
"G": [["E", 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod() | 692 | 0 |
'''simple docstring'''
import os
def lowerCAmelCase_ ( __A : str = "matrix.txt" ):
'''simple docstring'''
with open(os.path.join(os.path.dirname(__A ) , __A ) ) as in_file:
snake_case: Tuple = in_file.read()
snake_case: List[str] = [[int(__A ) for cell in row.split(',' )] for row in data.strip().splitlines()]
snake_case: Optional[int] = [[0 for cell in row] for row in grid]
snake_case: List[str] = len(grid[0] )
snake_case: Any = [[0 for i in range(__A )] for j in range(__A )]
snake_case: str = grid[0][0]
for i in range(1 , __A ):
snake_case: Dict = grid[0][i] + dp[0][i - 1]
for i in range(1 , __A ):
snake_case: Union[str, Any] = grid[i][0] + dp[i - 1][0]
for i in range(1 , __A ):
for j in range(1 , __A ):
snake_case: Any = grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1] )
return dp[-1][-1]
if __name__ == "__main__":
print(F'{solution() = }') | 711 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = "▁"
__UpperCAmelCase = {"vocab_file": "sentencepiece.bpe.model"}
__UpperCAmelCase = {
"vocab_file": {
"facebook/xglm-564M": "https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model",
}
}
__UpperCAmelCase = {
"facebook/xglm-564M": 2_048,
}
class SCREAMING_SNAKE_CASE ( snake_case ):
'''simple docstring'''
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = ["input_ids", "attention_mask"]
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="<unk>" , SCREAMING_SNAKE_CASE__="<pad>" , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ):
'''simple docstring'''
snake_case: Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
snake_case: Optional[Any] = 7
snake_case: List[str] = [F"""<madeupword{i}>""" for i in range(self.num_madeup_words )]
snake_case: str = kwargs.get('additional_special_tokens' , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE__ , )
snake_case: int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(SCREAMING_SNAKE_CASE__ ) )
snake_case: int = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
snake_case: Tuple = 1
# Mimic fairseq token-to-id alignment for the first 4 token
snake_case: Optional[Any] = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
snake_case: Union[str, Any] = len(self.sp_model )
snake_case: str = {F"""<madeupword{i}>""": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(SCREAMING_SNAKE_CASE__ )
snake_case: Union[str, Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ):
'''simple docstring'''
snake_case: List[Any] = self.__dict__.copy()
snake_case: Union[str, Any] = None
snake_case: Union[str, Any] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: Optional[int] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
snake_case: Union[str, Any] = {}
snake_case: Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
snake_case: Optional[Any] = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE__ , token_ids_a=SCREAMING_SNAKE_CASE__ , already_has_special_tokens=SCREAMING_SNAKE_CASE__ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ ))
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE__ ))
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
'''simple docstring'''
snake_case: int = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[int] = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return self.sp_model.encode(SCREAMING_SNAKE_CASE__ , out_type=SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
snake_case: Dict = self.sp_model.PieceToId(SCREAMING_SNAKE_CASE__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: Optional[Any] = ''.join(SCREAMING_SNAKE_CASE__ ).replace(SCREAMING_SNAKE_CASE__ , ' ' ).strip()
return out_string
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
'''simple docstring'''
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case: List[str] = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE__ , 'wb' ) as fi:
snake_case: int = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE__ )
return (out_vocab_file,) | 692 | 0 |
'''simple docstring'''
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def _UpperCamelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
snake_case: Any = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
snake_case: List[Any] = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
snake_case: int = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
'ResnetDownsampleBlock2D',
'SimpleCrossAttnDownBlock2D',
] , mid_block_type='UNetMidBlock2DSimpleCrossAttn' , up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='text' , addition_embed_type_num_heads=2 , cross_attention_norm='group_norm' , resnet_time_scale_shift='scale_shift' , act_fn='gelu' , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
snake_case: List[str] = DDPMScheduler(
num_train_timesteps=10_00 , beta_schedule='squaredcos_cap_v2' , beta_start=0.00_01 , beta_end=0.02 , thresholding=SCREAMING_SNAKE_CASE__ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='epsilon' , variance_type='learned_range' , )
torch.manual_seed(0 )
snake_case: Union[str, Any] = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def _UpperCamelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
snake_case: int = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
snake_case: Any = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
snake_case: Optional[int] = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
'ResnetDownsampleBlock2D',
'SimpleCrossAttnDownBlock2D',
] , mid_block_type='UNetMidBlock2DSimpleCrossAttn' , up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='text' , addition_embed_type_num_heads=2 , cross_attention_norm='group_norm' , resnet_time_scale_shift='scale_shift' , act_fn='gelu' , class_embed_type='timestep' , mid_block_scale_factor=1.4_14 , time_embedding_act_fn='gelu' , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
snake_case: str = DDPMScheduler(
num_train_timesteps=10_00 , beta_schedule='squaredcos_cap_v2' , beta_start=0.00_01 , beta_end=0.02 , thresholding=SCREAMING_SNAKE_CASE__ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='epsilon' , variance_type='learned_range' , )
torch.manual_seed(0 )
snake_case: int = DDPMScheduler(
num_train_timesteps=10_00 , beta_schedule='squaredcos_cap_v2' , beta_start=0.00_01 , beta_end=0.02 , )
torch.manual_seed(0 )
snake_case: Optional[int] = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: int = self.get_dummy_components()
snake_case: str = self.pipeline_class(**SCREAMING_SNAKE_CASE__ )
pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
snake_case: int = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
snake_case: str = inputs['prompt']
snake_case: str = inputs['generator']
snake_case: Optional[int] = inputs['num_inference_steps']
snake_case: Any = inputs['output_type']
if "image" in inputs:
snake_case: List[str] = inputs['image']
else:
snake_case: Optional[Any] = None
if "mask_image" in inputs:
snake_case: int = inputs['mask_image']
else:
snake_case: str = None
if "original_image" in inputs:
snake_case: str = inputs['original_image']
else:
snake_case: Optional[int] = None
snake_case: List[str] = pipe.encode_prompt(SCREAMING_SNAKE_CASE__ )
# inputs with prompt converted to embeddings
snake_case: Any = {
'prompt_embeds': prompt_embeds,
'negative_prompt_embeds': negative_prompt_embeds,
'generator': generator,
'num_inference_steps': num_inference_steps,
'output_type': output_type,
}
if image is not None:
snake_case: Union[str, Any] = image
if mask_image is not None:
snake_case: Union[str, Any] = mask_image
if original_image is not None:
snake_case: Union[str, Any] = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case: Dict = pipe(**SCREAMING_SNAKE_CASE__ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(SCREAMING_SNAKE_CASE__ )
snake_case: str = self.pipeline_class.from_pretrained(SCREAMING_SNAKE_CASE__ )
pipe_loaded.to(SCREAMING_SNAKE_CASE__ )
pipe_loaded.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) is None , F"""`{optional_component}` did not stay set to None after loading.""" , )
snake_case: Optional[Any] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
snake_case: Tuple = inputs['generator']
snake_case: Dict = inputs['num_inference_steps']
snake_case: Any = inputs['output_type']
# inputs with prompt converted to embeddings
snake_case: Dict = {
'prompt_embeds': prompt_embeds,
'negative_prompt_embeds': negative_prompt_embeds,
'generator': generator,
'num_inference_steps': num_inference_steps,
'output_type': output_type,
}
if image is not None:
snake_case: int = image
if mask_image is not None:
snake_case: Optional[Any] = mask_image
if original_image is not None:
snake_case: List[str] = original_image
snake_case: List[Any] = pipe_loaded(**SCREAMING_SNAKE_CASE__ )[0]
snake_case: Any = np.abs(to_np(SCREAMING_SNAKE_CASE__ ) - to_np(SCREAMING_SNAKE_CASE__ ) ).max()
self.assertLess(SCREAMING_SNAKE_CASE__ , 1E-4 )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Any = self.get_dummy_components()
snake_case: int = self.pipeline_class(**SCREAMING_SNAKE_CASE__ )
pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
snake_case: Dict = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
snake_case: List[str] = pipe(**SCREAMING_SNAKE_CASE__ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(SCREAMING_SNAKE_CASE__ )
snake_case: List[str] = self.pipeline_class.from_pretrained(SCREAMING_SNAKE_CASE__ )
pipe_loaded.to(SCREAMING_SNAKE_CASE__ )
pipe_loaded.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
snake_case: Any = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
snake_case: Tuple = pipe_loaded(**SCREAMING_SNAKE_CASE__ )[0]
snake_case: str = np.abs(to_np(SCREAMING_SNAKE_CASE__ ) - to_np(SCREAMING_SNAKE_CASE__ ) ).max()
self.assertLess(SCREAMING_SNAKE_CASE__ , 1E-4 ) | 712 |
'''simple docstring'''
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def lowerCAmelCase_ ( __A : Optional[Any] ):
'''simple docstring'''
return getitem, k
def lowerCAmelCase_ ( __A : Any , __A : Optional[int] ):
'''simple docstring'''
return setitem, k, v
def lowerCAmelCase_ ( __A : List[str] ):
'''simple docstring'''
return delitem, k
def lowerCAmelCase_ ( __A : str , __A : int , *__A : Tuple ):
'''simple docstring'''
try:
return fun(__A , *__A ), None
except Exception as e:
return None, e
__UpperCAmelCase = (
_set("key_a", "val_a"),
_set("key_b", "val_b"),
)
__UpperCAmelCase = [
_set("key_a", "val_a"),
_set("key_a", "val_b"),
]
__UpperCAmelCase = [
_set("key_a", "val_a"),
_set("key_b", "val_b"),
_del("key_a"),
_del("key_b"),
_set("key_a", "val_a"),
_del("key_a"),
]
__UpperCAmelCase = [
_get("key_a"),
_del("key_a"),
_set("key_a", "val_a"),
_del("key_a"),
_del("key_a"),
_get("key_a"),
]
__UpperCAmelCase = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
__UpperCAmelCase = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set("key_a", "val_b"),
]
@pytest.mark.parametrize(
'operations' , (
pytest.param(_add_items , id='add items' ),
pytest.param(_overwrite_items , id='overwrite items' ),
pytest.param(_delete_items , id='delete items' ),
pytest.param(_access_absent_items , id='access absent items' ),
pytest.param(_add_with_resize_up , id='add with resize up' ),
pytest.param(_add_with_resize_down , id='add with resize down' ),
) , )
def lowerCAmelCase_ ( __A : str ):
'''simple docstring'''
snake_case: List[Any] = HashMap(initial_block_size=4 )
snake_case: List[Any] = {}
for _, (fun, *args) in enumerate(__A ):
snake_case , snake_case: Optional[int] = _run_operation(__A , __A , *__A )
snake_case , snake_case: str = _run_operation(__A , __A , *__A )
assert my_res == py_res
assert str(__A ) == str(__A )
assert set(__A ) == set(__A )
assert len(__A ) == len(__A )
assert set(my.items() ) == set(py.items() )
def lowerCAmelCase_ ( ):
'''simple docstring'''
def is_public(__A : str ) -> bool:
return not name.startswith('_' )
snake_case: Dict = {name for name in dir({} ) if is_public(__A )}
snake_case: List[str] = {name for name in dir(HashMap() ) if is_public(__A )}
assert dict_public_names > hash_public_names | 692 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=7 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=18 , SCREAMING_SNAKE_CASE__=30 , SCREAMING_SNAKE_CASE__=4_00 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE__=[0.5, 0.5, 0.5] , ):
'''simple docstring'''
snake_case: Optional[int] = parent
snake_case: Union[str, Any] = batch_size
snake_case: List[str] = num_channels
snake_case: str = image_size
snake_case: Union[str, Any] = min_resolution
snake_case: Dict = max_resolution
snake_case: List[Any] = do_resize
snake_case: Dict = size if size is not None else {'height': 18, 'width': 20}
snake_case: List[str] = do_thumbnail
snake_case: Optional[int] = do_align_axis
snake_case: Union[str, Any] = do_pad
snake_case: Dict = do_normalize
snake_case: Tuple = image_mean
snake_case: str = image_std
def _UpperCamelCase ( self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( snake_case , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = DonutImageProcessor if is_vision_available() else None
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[Any] = DonutImageProcessingTester(self )
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , 'do_resize' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , 'size' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , 'do_thumbnail' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , 'do_align_long_axis' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , 'do_pad' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , 'do_normalize' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , 'image_mean' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , 'image_std' ) )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: str = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 20} )
snake_case: Any = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
# Previous config had dimensions in (width, height) order
snake_case: Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {'height': 84, 'width': 42} )
def _UpperCamelCase ( self ):
'''simple docstring'''
pass
@is_flaky()
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case: Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , Image.Image )
# Test not batched input
snake_case: str = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
snake_case: str = image_processing(SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case: List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , numpify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , np.ndarray )
# Test not batched input
snake_case: Dict = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
snake_case: Optional[Any] = image_processing(SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case: str = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , torchify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , torch.Tensor )
# Test not batched input
snake_case: List[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
snake_case: Optional[int] = image_processing(SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , ) | 713 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
__UpperCAmelCase = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def lowerCAmelCase_ ( __A : Any , __A : Optional[Any] , __A : Union[str, Any] , __A : int , __A : Optional[int] ):
'''simple docstring'''
for attribute in key.split('.' ):
snake_case: List[str] = getattr(__A , __A )
if weight_type is not None:
snake_case: Optional[int] = getattr(__A , __A ).shape
else:
snake_case: Optional[int] = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
snake_case: Optional[int] = value
elif weight_type == "weight_g":
snake_case: List[str] = value
elif weight_type == "weight_v":
snake_case: Dict = value
elif weight_type == "bias":
snake_case: Optional[Any] = value
else:
snake_case: int = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def lowerCAmelCase_ ( __A : List[Any] , __A : List[str] ):
'''simple docstring'''
snake_case: List[Any] = []
snake_case: List[Any] = fairseq_model.state_dict()
snake_case: Union[str, Any] = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
snake_case: Dict = None
for name, value in fairseq_dict.items():
snake_case: Tuple = False
if "conv_layers" in name:
load_conv_layer(
__A , __A , __A , __A , hf_model.config.feat_extract_norm == 'group' , )
snake_case: List[Any] = True
elif name.split('.' )[0] == "proj":
snake_case: List[Any] = fairseq_model.proj
snake_case: int = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
snake_case: int = True
if "*" in mapped_key:
snake_case: List[str] = name.split(__A )[0].split('.' )[-2]
snake_case: Dict = mapped_key.replace('*' , __A )
if "weight_g" in name:
snake_case: Tuple = 'weight_g'
elif "weight_v" in name:
snake_case: int = 'weight_v'
elif "bias" in name:
snake_case: Tuple = 'bias'
elif "weight" in name:
snake_case: List[Any] = 'weight'
else:
snake_case: Any = None
set_recursively(__A , __A , __A , __A , __A )
continue
if not is_used:
unused_weights.append(__A )
logger.warning(f"""Unused weights: {unused_weights}""" )
return proj_weight
def lowerCAmelCase_ ( __A : List[str] , __A : List[Any] , __A : int , __A : Optional[Any] , __A : Optional[int] ):
'''simple docstring'''
snake_case: int = full_name.split('conv_layers.' )[-1]
snake_case: Tuple = name.split('.' )
snake_case: Any = int(items[0] )
snake_case: Optional[int] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
snake_case: Tuple = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
snake_case: int = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
snake_case: Any = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
snake_case: str = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__A )
def lowerCAmelCase_ ( __A : Dict ):
'''simple docstring'''
snake_case , snake_case: List[Any] = emb.weight.shape
snake_case: Optional[int] = nn.Linear(__A , __A , bias=__A )
snake_case: Any = emb.weight.data
return lin_layer
def lowerCAmelCase_ ( __A : Optional[int] ):
'''simple docstring'''
with open(__A , 'r' , encoding='utf-8' ) as f:
snake_case: List[Any] = f.readlines()
snake_case: Any = [line.split(' ' )[0] for line in lines]
snake_case: int = len(__A )
snake_case: Dict = {
'<s>': 0,
'<pad>': 1,
'</s>': 2,
'<unk>': 3,
}
vocab_dict.update(dict(zip(__A , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def lowerCAmelCase_ ( __A : Optional[Any] , __A : Union[str, Any] , __A : Dict , __A : Any , __A : List[Any] , __A : int , __A : str , ):
'''simple docstring'''
snake_case: Union[str, Any] = WavaVecaConfig.from_pretrained(__A )
snake_case: str = SpeechaTextaConfig.from_pretrained(
__A , vocab_size=__A , decoder_layers=__A , do_stable_layer_norm=__A )
snake_case: List[str] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=__A , return_attention_mask=__A , )
snake_case , snake_case , snake_case: List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
snake_case: List[Any] = model[0].eval()
# set weights for wav2vec2 encoder
snake_case: Optional[Any] = WavaVecaModel(__A )
snake_case: Any = recursively_load_weights_wavaveca(model.encoder , __A )
snake_case: Union[str, Any] = SpeechaTextaForCausalLM(__A )
snake_case , snake_case: Optional[Any] = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=__A )
# set output linear layer
unexpected_keys.remove('embed_out' )
snake_case: str = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(f"""The following keys are missing when loading the decoder weights: {missing_keys}""" )
logger.warning(f"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" )
snake_case: int = SpeechEncoderDecoderModel(encoder=__A , decoder=__A )
snake_case: List[Any] = False
# add projection layer
snake_case: Union[str, Any] = nn.Parameter(projection_layer.weight )
snake_case: Union[str, Any] = nn.Parameter(projection_layer.bias )
snake_case: List[Any] = create_vocab_dict(__A )
with open(os.path.join(__A , 'vocab.json' ) , 'w' ) as fp:
json.dump(__A , __A )
snake_case: Union[str, Any] = SpeechaTextaTokenizer(os.path.join(__A , 'vocab.json' ) )
tokenizer.save_pretrained(__A )
snake_case: Tuple = hf_wavavec.config.to_dict()
snake_case: int = tokenizer.pad_token_id
snake_case: Dict = tokenizer.bos_token_id
snake_case: Optional[int] = tokenizer.eos_token_id
snake_case: Dict = 'speech_to_text_2'
snake_case: Optional[Any] = 'wav2vec2'
snake_case: Tuple = SpeechEncoderDecoderConfig.from_dict(__A )
hf_wavavec.save_pretrained(__A )
feature_extractor.save_pretrained(__A )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument(
"--encoder_config_path",
default="facebook/wav2vec2-large-lv60",
type=str,
help="Path to hf encoder wav2vec2 checkpoint config",
)
parser.add_argument(
"--decoder_config_path",
default="facebook/s2t-small-mustc-en-fr-st",
type=str,
help="Path to hf decoder s2t checkpoint config",
)
parser.add_argument("--vocab_size", default=10_224, type=int, help="Vocab size of decoder")
parser.add_argument("--num_decoder_layers", default=7, type=int, help="Number of decoder layers")
__UpperCAmelCase = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
) | 692 | 0 |
'''simple docstring'''
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.17.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
__UpperCAmelCase = logging.getLogger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
__UpperCamelCase = field(
default="tab_fact" , metadata={"help": "The name of the dataset to use (via the datasets library)."} )
__UpperCamelCase = field(
default="tab_fact" , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} , )
__UpperCamelCase = field(
default=1024 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__UpperCamelCase = field(
default=snake_case , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
__UpperCamelCase = field(
default=snake_case , metadata={
"help": (
"Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
)
} , )
__UpperCamelCase = field(
default=snake_case , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
__UpperCamelCase = field(
default=snake_case , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
__UpperCamelCase = field(
default=snake_case , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
)
} , )
__UpperCamelCase = field(
default=snake_case , metadata={"help": "A csv or a json file containing the training data."} )
__UpperCamelCase = field(
default=snake_case , metadata={"help": "A csv or a json file containing the validation data."} )
__UpperCamelCase = field(default=snake_case , metadata={"help": "A csv or a json file containing the test data."} )
def _UpperCamelCase ( self ):
'''simple docstring'''
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('Need either a GLUE task, a training/validation file or a dataset name.' )
else:
snake_case: str = self.train_file.split('.' )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
snake_case: Optional[Any] = self.validation_file.split('.' )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
__UpperCamelCase = field(
default=snake_case , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
__UpperCamelCase = field(
default=snake_case , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
__UpperCamelCase = field(
default=snake_case , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
__UpperCamelCase = field(
default=snake_case , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
__UpperCamelCase = field(
default=snake_case , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
__UpperCamelCase = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
__UpperCamelCase = field(
default=snake_case , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
def lowerCAmelCase_ ( ):
'''simple docstring'''
snake_case: Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
snake_case: Union[str, Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
snake_case: str = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
snake_case: Tuple = training_args.get_process_log_level()
logger.setLevel(__A )
datasets.utils.logging.set_verbosity(__A )
transformers.utils.logging.set_verbosity(__A )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
snake_case: Any = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
snake_case: List[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
snake_case: int = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
snake_case: Optional[int] = {'train': data_args.train_file, 'validation': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
snake_case: Tuple = data_args.train_file.split('.' )[-1]
snake_case: Union[str, Any] = data_args.test_file.split('.' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
snake_case: Union[str, Any] = data_args.test_file
else:
raise ValueError('Need either a GLUE task or a test file for `do_predict`.' )
for key in data_files.keys():
logger.info(f"""load a local file for {key}: {data_files[key]}""" )
if data_args.train_file.endswith('.csv' ):
# Loading a dataset from local csv files
snake_case: List[Any] = load_dataset('csv' , data_files=__A , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
snake_case: Optional[Any] = load_dataset('json' , data_files=__A , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
snake_case: Tuple = raw_datasets['train'].features['label'].names
snake_case: List[str] = len(__A )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case: Tuple = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__A , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
snake_case: List[str] = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=__A , )
snake_case: Union[str, Any] = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=__A , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
snake_case: int = 'max_length'
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
snake_case: Union[str, Any] = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
snake_case: Optional[Any] = {'Refused': 0, 'Entailed': 1}
snake_case: List[Any] = {0: 'Refused', 1: 'Entailed'}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
snake_case: List[str] = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(__A : Any ):
# Tokenize the texts
def _convert_table_text_to_pandas(__A : Dict ):
snake_case: str = [_table_row.split('#' ) for _table_row in _table_text.strip('\n' ).split('\n' )]
snake_case: List[str] = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
snake_case: str = examples['statement']
snake_case: int = list(map(_convert_table_text_to_pandas , examples['table_text'] ) )
snake_case: List[Any] = tokenizer(__A , __A , padding=__A , max_length=__A , truncation=__A )
snake_case: List[Any] = examples['label']
return result
with training_args.main_process_first(desc='dataset map pre-processing' ):
snake_case: int = raw_datasets.map(
__A , batched=__A , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on dataset' , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('--do_train requires a train dataset' )
snake_case: List[str] = raw_datasets['train']
if data_args.max_train_samples is not None:
snake_case: Tuple = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('--do_eval requires a validation dataset' )
snake_case: Any = raw_datasets['validation']
if data_args.max_eval_samples is not None:
snake_case: Optional[int] = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('--do_predict requires a test dataset' )
snake_case: str = raw_datasets['test']
if data_args.max_predict_samples is not None:
snake_case: List[str] = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(__A ) ) , 3 ):
logger.info(f"""Sample {index} of the training set: {train_dataset[index]}.""" )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(__A : EvalPrediction ):
snake_case: int = p.predictions[0] if isinstance(p.predictions , __A ) else p.predictions
snake_case: List[str] = np.argmax(__A , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
snake_case: str = default_data_collator
elif training_args.fpaa:
snake_case: List[str] = DataCollatorWithPadding(__A , pad_to_multiple_of=8 )
else:
snake_case: List[Any] = None
# Initialize our Trainer
snake_case: List[str] = Trainer(
model=__A , args=__A , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=__A , tokenizer=__A , data_collator=__A , )
# Training
if training_args.do_train:
snake_case: Optional[int] = None
if training_args.resume_from_checkpoint is not None:
snake_case: str = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
snake_case: Optional[Any] = last_checkpoint
snake_case: Union[str, Any] = trainer.train(resume_from_checkpoint=__A )
snake_case: List[Any] = train_result.metrics
snake_case: List[Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__A )
)
snake_case: Optional[Any] = min(__A , len(__A ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('train' , __A )
trainer.save_metrics('train' , __A )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
snake_case: Dict = trainer.evaluate(eval_dataset=__A )
snake_case: Optional[int] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__A )
snake_case: Dict = min(__A , len(__A ) )
trainer.log_metrics('eval' , __A )
trainer.save_metrics('eval' , __A )
if training_args.do_predict:
logger.info('*** Predict ***' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
snake_case: Optional[int] = predict_dataset.remove_columns('label' )
snake_case: str = trainer.predict(__A , metric_key_prefix='predict' ).predictions
snake_case: Any = np.argmax(__A , axis=1 )
snake_case: int = os.path.join(training_args.output_dir , 'predict_results_tabfact.txt' )
if trainer.is_world_process_zero():
with open(__A , 'w' ) as writer:
logger.info('***** Predict Results *****' )
writer.write('index\tprediction\n' )
for index, item in enumerate(__A ):
snake_case: int = label_list[item]
writer.write(f"""{index}\t{item}\n""" )
snake_case: Optional[int] = {'finetuned_from': model_args.model_name_or_path, 'tasks': 'text-classification'}
if training_args.push_to_hub:
trainer.push_to_hub(**__A )
else:
trainer.create_model_card(**__A )
def lowerCAmelCase_ ( __A : Optional[Any] ):
'''simple docstring'''
main()
if __name__ == "__main__":
main() | 714 |
'''simple docstring'''
def lowerCAmelCase_ ( __A : int = 1_00 ):
'''simple docstring'''
snake_case: List[str] = n * (n + 1) * (2 * n + 1) / 6
snake_case: List[Any] = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F'{solution() = }') | 692 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__UpperCAmelCase = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ["MLukeTokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 715 |
'''simple docstring'''
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
__UpperCAmelCase = [
# tf -> hf
("/", "."),
("layer_", "layers."),
("kernel", "weight"),
("beta", "bias"),
("gamma", "weight"),
("pegasus", "model"),
]
__UpperCAmelCase = [
(".output.dense", ".fc2"),
("intermediate.LayerNorm", "final_layer_norm"),
("intermediate.dense", "fc1"),
]
__UpperCAmelCase = (
INIT_COMMON
+ [
("attention.self.LayerNorm", "self_attn_layer_norm"),
("attention.output.dense", "self_attn.out_proj"),
("attention.self", "self_attn"),
("attention.encdec.LayerNorm", "encoder_attn_layer_norm"),
("attention.encdec_output.dense", "encoder_attn.out_proj"),
("attention.encdec", "encoder_attn"),
("key", "k_proj"),
("value", "v_proj"),
("query", "q_proj"),
("decoder.LayerNorm", "decoder.layernorm_embedding"),
]
+ END_COMMON
)
__UpperCAmelCase = (
INIT_COMMON
+ [
("embeddings.word_embeddings", "shared.weight"),
("embeddings.position_embeddings", "embed_positions.weight"),
("attention.self.LayerNorm", "self_attn_layer_norm"),
("attention.output.dense", "self_attn.output"),
("attention.self", "self_attn.self"),
("encoder.LayerNorm", "encoder.layernorm_embedding"),
]
+ END_COMMON
)
__UpperCAmelCase = [
"encdec/key/bias",
"encdec/query/bias",
"encdec/value/bias",
"self/key/bias",
"self/query/bias",
"self/value/bias",
"encdec_output/dense/bias",
"attention/output/dense/bias",
]
def lowerCAmelCase_ ( __A : Dict , __A : List[Any] ):
'''simple docstring'''
for tf_name, hf_name in patterns:
snake_case: List[Any] = k.replace(__A , __A )
return k
def lowerCAmelCase_ ( __A : dict , __A : dict ):
'''simple docstring'''
snake_case: Optional[int] = BigBirdPegasusConfig(**__A )
snake_case: List[Any] = BigBirdPegasusForConditionalGeneration(__A )
snake_case: Any = torch_model.state_dict()
snake_case: Any = {}
# separating decoder weights
snake_case: Optional[Any] = {k: tf_weights[k] for k in tf_weights if k.startswith('pegasus/decoder' )}
snake_case: Any = {k: tf_weights[k] for k in tf_weights if not k.startswith('pegasus/decoder' )}
for k, v in tqdm(decoder_weights.items() , 'tf -> hf conversion' ):
snake_case: List[str] = [k.endswith(__A ) for ending in KEYS_TO_IGNORE]
if any(__A ):
continue
snake_case: Any = DECODER_PATTERNS
snake_case: int = rename_state_dict_key(__A , __A )
if new_k not in state_dict:
raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ['dense', 'query', 'key', 'value'] ):
snake_case: Optional[Any] = v.T
snake_case: Any = torch.from_numpy(__A )
assert v.shape == state_dict[new_k].shape, f"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
for k, v in tqdm(remaining_weights.items() , 'tf -> hf conversion' ):
snake_case: List[Any] = [k.endswith(__A ) for ending in KEYS_TO_IGNORE]
if any(__A ):
continue
snake_case: Union[str, Any] = REMAINING_PATTERNS
snake_case: str = rename_state_dict_key(__A , __A )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ['dense', 'query', 'key', 'value'] ):
snake_case: int = v.T
snake_case: Any = torch.from_numpy(__A )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, f"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
snake_case: str = mapping['model.embed_positions.weight']
snake_case: Any = mapping.pop('model.embed_positions.weight' )
snake_case , snake_case: Union[str, Any] = torch_model.load_state_dict(__A , strict=__A )
snake_case: Optional[int] = [
k
for k in missing
if k
not in [
'final_logits_bias',
'model.encoder.embed_tokens.weight',
'model.decoder.embed_tokens.weight',
'lm_head.weight',
]
]
assert unexpected_missing == [], f"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], f"""no matches found for the following tf keys {extra}"""
return torch_model
def lowerCAmelCase_ ( __A : Optional[int] ):
'''simple docstring'''
snake_case: Tuple = tf.train.list_variables(__A )
snake_case: str = {}
snake_case: List[str] = ['global_step']
for name, shape in tqdm(__A , desc='converting tf checkpoint to dict' ):
snake_case: str = any(pat in name for pat in ignore_name )
if skip_key:
continue
snake_case: Any = tf.train.load_variable(__A , __A )
snake_case: Optional[int] = array
return tf_weights
def lowerCAmelCase_ ( __A : str , __A : str , __A : dict ):
'''simple docstring'''
snake_case: int = get_tf_weights_as_numpy(__A )
snake_case: int = convert_bigbird_pegasus(__A , __A )
torch_model.save_pretrained(__A )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("--tf_ckpt_path", type=str, help="passed to tf.train.list_variables")
parser.add_argument("--save_dir", default=None, type=str, help="Path to the output PyTorch model.")
__UpperCAmelCase = parser.parse_args()
__UpperCAmelCase = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update) | 692 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__UpperCAmelCase = {
"configuration_rag": ["RagConfig"],
"retrieval_rag": ["RagRetriever"],
"tokenization_rag": ["RagTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"RagModel",
"RagPreTrainedModel",
"RagSequenceForGeneration",
"RagTokenForGeneration",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"TFRagModel",
"TFRagPreTrainedModel",
"TFRagSequenceForGeneration",
"TFRagTokenForGeneration",
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 716 |
'''simple docstring'''
def lowerCAmelCase_ ( __A : List[str] ):
'''simple docstring'''
snake_case: str = [0] * len(__A )
snake_case: Tuple = []
snake_case: Tuple = [1] * len(__A )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(__A ) ):
if indegree[i] == 0:
queue.append(__A )
while queue:
snake_case: int = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
snake_case: Any = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(__A )
print(max(__A ) )
# Adjacency list of Graph
__UpperCAmelCase = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph) | 692 | 0 |
def lowerCAmelCase_ ( __A : int = 1_00 ):
'''simple docstring'''
snake_case: List[str] = n * (n + 1) * (2 * n + 1) / 6
snake_case: List[Any] = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F'{solution() = }') | 717 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[Any] = tempfile.mkdtemp()
snake_case: Optional[Any] = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'的',
'价',
'格',
'是',
'15',
'便',
'alex',
'##andra',
',',
'。',
'-',
't',
'shirt',
]
snake_case: Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
snake_case: Optional[int] = {
'do_resize': True,
'size': {'height': 2_24, 'width': 2_24},
'do_center_crop': True,
'crop_size': {'height': 18, 'width': 18},
'do_normalize': True,
'image_mean': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'image_std': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
'do_convert_rgb': True,
}
snake_case: Union[str, Any] = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE__ )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return BertTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[int] = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
snake_case: Tuple = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[Any] = self.get_tokenizer()
snake_case: Union[str, Any] = self.get_rust_tokenizer()
snake_case: Union[str, Any] = self.get_image_processor()
snake_case: List[str] = ChineseCLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
processor_slow.save_pretrained(self.tmpdirname )
snake_case: List[str] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=SCREAMING_SNAKE_CASE__ )
snake_case: Any = ChineseCLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
processor_fast.save_pretrained(self.tmpdirname )
snake_case: Optional[int] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(processor_fast.tokenizer , SCREAMING_SNAKE_CASE__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(processor_fast.image_processor , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[Any] = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
snake_case: Optional[int] = self.get_tokenizer(cls_token='(CLS)' , sep_token='(SEP)' )
snake_case: Union[str, Any] = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE__ )
snake_case: Union[str, Any] = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token='(CLS)' , sep_token='(SEP)' , do_normalize=SCREAMING_SNAKE_CASE__ )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[int] = self.get_image_processor()
snake_case: Tuple = self.get_tokenizer()
snake_case: Optional[Any] = ChineseCLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = self.prepare_image_inputs()
snake_case: List[Any] = image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='np' )
snake_case: Dict = processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[Any] = self.get_image_processor()
snake_case: Optional[int] = self.get_tokenizer()
snake_case: List[Any] = ChineseCLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = 'Alexandra,T-shirt的价格是15便士。'
snake_case: Union[str, Any] = processor(text=SCREAMING_SNAKE_CASE__ )
snake_case: Optional[Any] = tokenizer(SCREAMING_SNAKE_CASE__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: int = self.get_image_processor()
snake_case: Dict = self.get_tokenizer()
snake_case: Optional[int] = ChineseCLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
snake_case: Dict = 'Alexandra,T-shirt的价格是15便士。'
snake_case: Tuple = self.prepare_image_inputs()
snake_case: Any = processor(text=SCREAMING_SNAKE_CASE__ , images=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
processor()
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: str = self.get_image_processor()
snake_case: str = self.get_tokenizer()
snake_case: Union[str, Any] = ChineseCLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
snake_case: int = processor.batch_decode(SCREAMING_SNAKE_CASE__ )
snake_case: Dict = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Dict = self.get_image_processor()
snake_case: Dict = self.get_tokenizer()
snake_case: Optional[int] = ChineseCLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
snake_case: Tuple = 'Alexandra,T-shirt的价格是15便士。'
snake_case: List[Any] = self.prepare_image_inputs()
snake_case: Dict = processor(text=SCREAMING_SNAKE_CASE__ , images=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names ) | 692 | 0 |
'''simple docstring'''
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE ( snake_case ):
'''simple docstring'''
__UpperCamelCase = (PNDMScheduler,)
__UpperCamelCase = (("num_inference_steps", 50),)
def _UpperCamelCase ( self , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: Tuple = {
'num_train_timesteps': 10_00,
'beta_start': 0.00_01,
'beta_end': 0.02,
'beta_schedule': 'linear',
}
config.update(**SCREAMING_SNAKE_CASE__ )
return config
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__=0 , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: List[str] = dict(self.forward_default_kwargs )
snake_case: Any = kwargs.pop('num_inference_steps' , SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = self.dummy_sample
snake_case: Optional[Any] = 0.1 * sample
snake_case: Dict = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
snake_case: int = self.get_scheduler_config(**SCREAMING_SNAKE_CASE__ )
snake_case: Any = scheduler_class(**SCREAMING_SNAKE_CASE__ )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
# copy over dummy past residuals
snake_case: str = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(SCREAMING_SNAKE_CASE__ )
snake_case: str = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE__ )
new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
# copy over dummy past residuals
snake_case: Any = dummy_past_residuals[:]
snake_case: List[str] = scheduler.step_prk(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
snake_case: Dict = new_scheduler.step_prk(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
snake_case: Optional[int] = scheduler.step_plms(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
snake_case: List[str] = new_scheduler.step_plms(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _UpperCamelCase ( self ):
'''simple docstring'''
pass
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__=0 , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: int = dict(self.forward_default_kwargs )
snake_case: int = kwargs.pop('num_inference_steps' , SCREAMING_SNAKE_CASE__ )
snake_case: List[Any] = self.dummy_sample
snake_case: Optional[int] = 0.1 * sample
snake_case: Optional[Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
snake_case: List[Any] = self.get_scheduler_config()
snake_case: Any = scheduler_class(**SCREAMING_SNAKE_CASE__ )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
# copy over dummy past residuals (must be after setting timesteps)
snake_case: Union[str, Any] = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(SCREAMING_SNAKE_CASE__ )
snake_case: Optional[Any] = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
# copy over dummy past residual (must be after setting timesteps)
snake_case: Tuple = dummy_past_residuals[:]
snake_case: str = scheduler.step_prk(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
snake_case: List[Any] = new_scheduler.step_prk(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
snake_case: Tuple = scheduler.step_plms(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
snake_case: Optional[Any] = new_scheduler.step_plms(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _UpperCamelCase ( self , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: List[Any] = self.scheduler_classes[0]
snake_case: List[str] = self.get_scheduler_config(**SCREAMING_SNAKE_CASE__ )
snake_case: Optional[Any] = scheduler_class(**SCREAMING_SNAKE_CASE__ )
snake_case: Optional[Any] = 10
snake_case: int = self.dummy_model()
snake_case: List[Any] = self.dummy_sample_deter
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
for i, t in enumerate(scheduler.prk_timesteps ):
snake_case: Dict = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case: Tuple = scheduler.step_prk(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
snake_case: Optional[Any] = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case: Any = scheduler.step_plms(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).prev_sample
return sample
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Any = dict(self.forward_default_kwargs )
snake_case: Dict = kwargs.pop('num_inference_steps' , SCREAMING_SNAKE_CASE__ )
for scheduler_class in self.scheduler_classes:
snake_case: int = self.get_scheduler_config()
snake_case: Tuple = scheduler_class(**SCREAMING_SNAKE_CASE__ )
snake_case: str = self.dummy_sample
snake_case: List[str] = 0.1 * sample
if num_inference_steps is not None and hasattr(SCREAMING_SNAKE_CASE__ , 'set_timesteps' ):
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
elif num_inference_steps is not None and not hasattr(SCREAMING_SNAKE_CASE__ , 'set_timesteps' ):
snake_case: Optional[int] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
snake_case: Optional[int] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
snake_case: int = dummy_past_residuals[:]
snake_case: Any = scheduler.step_prk(SCREAMING_SNAKE_CASE__ , 0 , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
snake_case: Dict = scheduler.step_prk(SCREAMING_SNAKE_CASE__ , 1 , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
snake_case: Any = scheduler.step_plms(SCREAMING_SNAKE_CASE__ , 0 , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
snake_case: Union[str, Any] = scheduler.step_plms(SCREAMING_SNAKE_CASE__ , 1 , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def _UpperCamelCase ( self ):
'''simple docstring'''
for timesteps in [1_00, 10_00]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=SCREAMING_SNAKE_CASE__ )
snake_case: Tuple = self.scheduler_classes[0]
snake_case: Optional[Any] = self.get_scheduler_config(steps_offset=1 )
snake_case: Union[str, Any] = scheduler_class(**SCREAMING_SNAKE_CASE__ )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[9_01, 8_51, 8_51, 8_01, 8_01, 7_51, 7_51, 7_01, 7_01, 6_51, 6_51, 6_01, 6_01, 5_01, 4_01, 3_01, 2_01, 1_01, 1] ) , )
def _UpperCamelCase ( self ):
'''simple docstring'''
for beta_start, beta_end in zip([0.00_01, 0.0_01] , [0.0_02, 0.02] ):
self.check_over_configs(beta_start=SCREAMING_SNAKE_CASE__ , beta_end=SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
for t in [1, 5, 10]:
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 1_00] ):
self.check_over_forward(num_inference_steps=SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[int] = 27
for scheduler_class in self.scheduler_classes:
snake_case: Optional[Any] = self.dummy_sample
snake_case: Optional[int] = 0.1 * sample
snake_case: Dict = self.get_scheduler_config()
snake_case: Tuple = scheduler_class(**SCREAMING_SNAKE_CASE__ )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
snake_case: List[Any] = scheduler.step_prk(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).prev_sample
def _UpperCamelCase ( self ):
'''simple docstring'''
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
snake_case: str = self.scheduler_classes[0]
snake_case: List[Any] = self.get_scheduler_config()
snake_case: Dict = scheduler_class(**SCREAMING_SNAKE_CASE__ )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: int = self.full_loop()
snake_case: Any = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) )
snake_case: Any = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_sum.item() - 1_98.13_18 ) < 1E-2
assert abs(result_mean.item() - 0.25_80 ) < 1E-3
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Dict = self.full_loop(prediction_type='v_prediction' )
snake_case: List[Any] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) )
snake_case: List[str] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_sum.item() - 67.39_86 ) < 1E-2
assert abs(result_mean.item() - 0.08_78 ) < 1E-3
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Tuple = self.full_loop(set_alpha_to_one=SCREAMING_SNAKE_CASE__ , beta_start=0.01 )
snake_case: Tuple = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) )
snake_case: Tuple = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_sum.item() - 2_30.03_99 ) < 1E-2
assert abs(result_mean.item() - 0.29_95 ) < 1E-3
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Dict = self.full_loop(set_alpha_to_one=SCREAMING_SNAKE_CASE__ , beta_start=0.01 )
snake_case: Optional[Any] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) )
snake_case: int = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_sum.item() - 1_86.94_82 ) < 1E-2
assert abs(result_mean.item() - 0.24_34 ) < 1E-3 | 718 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"microsoft/swinv2-tiny-patch4-window8-256": (
"https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"
),
}
class SCREAMING_SNAKE_CASE ( snake_case ):
'''simple docstring'''
__UpperCamelCase = "swinv2"
__UpperCamelCase = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , SCREAMING_SNAKE_CASE__=2_24 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=96 , SCREAMING_SNAKE_CASE__=[2, 2, 6, 2] , SCREAMING_SNAKE_CASE__=[3, 6, 12, 24] , SCREAMING_SNAKE_CASE__=7 , SCREAMING_SNAKE_CASE__=4.0 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-5 , SCREAMING_SNAKE_CASE__=32 , **SCREAMING_SNAKE_CASE__ , ):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE__ )
snake_case: int = image_size
snake_case: Union[str, Any] = patch_size
snake_case: List[str] = num_channels
snake_case: Tuple = embed_dim
snake_case: str = depths
snake_case: Any = len(SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = num_heads
snake_case: Optional[int] = window_size
snake_case: Any = mlp_ratio
snake_case: Optional[int] = qkv_bias
snake_case: Union[str, Any] = hidden_dropout_prob
snake_case: List[str] = attention_probs_dropout_prob
snake_case: Dict = drop_path_rate
snake_case: List[str] = hidden_act
snake_case: int = use_absolute_embeddings
snake_case: Any = layer_norm_eps
snake_case: Dict = initializer_range
snake_case: List[Any] = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
snake_case: Tuple = int(embed_dim * 2 ** (len(SCREAMING_SNAKE_CASE__ ) - 1) )
snake_case: Union[str, Any] = (0, 0, 0, 0) | 692 | 0 |
'''simple docstring'''
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def lowerCAmelCase_ ( __A : Optional[Any] ):
'''simple docstring'''
return getitem, k
def lowerCAmelCase_ ( __A : Any , __A : Optional[int] ):
'''simple docstring'''
return setitem, k, v
def lowerCAmelCase_ ( __A : List[str] ):
'''simple docstring'''
return delitem, k
def lowerCAmelCase_ ( __A : str , __A : int , *__A : Tuple ):
'''simple docstring'''
try:
return fun(__A , *__A ), None
except Exception as e:
return None, e
__UpperCAmelCase = (
_set("key_a", "val_a"),
_set("key_b", "val_b"),
)
__UpperCAmelCase = [
_set("key_a", "val_a"),
_set("key_a", "val_b"),
]
__UpperCAmelCase = [
_set("key_a", "val_a"),
_set("key_b", "val_b"),
_del("key_a"),
_del("key_b"),
_set("key_a", "val_a"),
_del("key_a"),
]
__UpperCAmelCase = [
_get("key_a"),
_del("key_a"),
_set("key_a", "val_a"),
_del("key_a"),
_del("key_a"),
_get("key_a"),
]
__UpperCAmelCase = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
__UpperCAmelCase = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set("key_a", "val_b"),
]
@pytest.mark.parametrize(
'operations' , (
pytest.param(_add_items , id='add items' ),
pytest.param(_overwrite_items , id='overwrite items' ),
pytest.param(_delete_items , id='delete items' ),
pytest.param(_access_absent_items , id='access absent items' ),
pytest.param(_add_with_resize_up , id='add with resize up' ),
pytest.param(_add_with_resize_down , id='add with resize down' ),
) , )
def lowerCAmelCase_ ( __A : str ):
'''simple docstring'''
snake_case: List[Any] = HashMap(initial_block_size=4 )
snake_case: List[Any] = {}
for _, (fun, *args) in enumerate(__A ):
snake_case: Optional[int] = _run_operation(__A , __A , *__A )
snake_case: str = _run_operation(__A , __A , *__A )
assert my_res == py_res
assert str(__A ) == str(__A )
assert set(__A ) == set(__A )
assert len(__A ) == len(__A )
assert set(my.items() ) == set(py.items() )
def lowerCAmelCase_ ( ):
'''simple docstring'''
def is_public(__A : str ) -> bool:
return not name.startswith('_' )
snake_case: Dict = {name for name in dir({} ) if is_public(__A )}
snake_case: List[str] = {name for name in dir(HashMap() ) if is_public(__A )}
assert dict_public_names > hash_public_names
| 719 |
'''simple docstring'''
import os
import sys
import unittest
__UpperCAmelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__UpperCAmelCase = os.path.join(git_repo_path, "src", "transformers")
__UpperCAmelCase = "\n{0} = None\n"
__UpperCAmelCase = "\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n"
__UpperCAmelCase = "\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n"
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[str] = find_backend(' _import_structure["models.albert"].append("AlbertTokenizerFast")' )
self.assertIsNone(SCREAMING_SNAKE_CASE__ )
snake_case: List[str] = find_backend(' if not is_tokenizers_available():' )
self.assertEqual(SCREAMING_SNAKE_CASE__ , 'tokenizers' )
snake_case: List[Any] = find_backend(' if not is_tensorflow_text_available():' )
self.assertEqual(SCREAMING_SNAKE_CASE__ , 'tensorflow_text' )
snake_case: int = find_backend(' if not (is_sentencepiece_available() and is_tokenizers_available()):' )
self.assertEqual(SCREAMING_SNAKE_CASE__ , 'sentencepiece_and_tokenizers' )
snake_case: Optional[Any] = find_backend(
' if not (is_sentencepiece_available() and is_tensorflow_text_available()):' )
self.assertEqual(SCREAMING_SNAKE_CASE__ , 'sentencepiece_and_tensorflow_text' )
snake_case: Dict = find_backend(
' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):' )
self.assertEqual(SCREAMING_SNAKE_CASE__ , 'sentencepiece_and_tokenizers_and_vision' )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: str = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('torch' , SCREAMING_SNAKE_CASE__ )
self.assertIn('tensorflow_text' , SCREAMING_SNAKE_CASE__ )
self.assertIn('sentencepiece_and_tokenizers' , SCREAMING_SNAKE_CASE__ )
# Likewise, we can't assert on the exact content of a key
self.assertIn('BertModel' , objects['torch'] )
self.assertIn('TFBertModel' , objects['tf'] )
self.assertIn('FlaxBertModel' , objects['flax'] )
self.assertIn('BertModel' , objects['torch'] )
self.assertIn('TFBertTokenizer' , objects['tensorflow_text'] )
self.assertIn('convert_slow_tokenizer' , objects['sentencepiece_and_tokenizers'] )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Dict = create_dummy_object('CONSTANT' , '\'torch\'' )
self.assertEqual(SCREAMING_SNAKE_CASE__ , '\nCONSTANT = None\n' )
snake_case: Any = create_dummy_object('function' , '\'torch\'' )
self.assertEqual(
SCREAMING_SNAKE_CASE__ , '\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n' )
snake_case: Optional[int] = '\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n'
snake_case: Tuple = create_dummy_object('FakeClass' , '\'torch\'' )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Dict = '# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, ["torch"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = ["torch"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, ["torch"])\n'
snake_case: Optional[int] = create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']} )
self.assertEqual(dummy_files['torch'] , SCREAMING_SNAKE_CASE__ ) | 692 | 0 |
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"speechbrain/m-ctc-t-large": "https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json",
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class SCREAMING_SNAKE_CASE ( snake_case ):
'''simple docstring'''
__UpperCamelCase = "mctct"
def __init__( self , SCREAMING_SNAKE_CASE__=80_65 , SCREAMING_SNAKE_CASE__=15_36 , SCREAMING_SNAKE_CASE__=36 , SCREAMING_SNAKE_CASE__=61_44 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=3_84 , SCREAMING_SNAKE_CASE__=9_20 , SCREAMING_SNAKE_CASE__=1E-5 , SCREAMING_SNAKE_CASE__=0.3 , SCREAMING_SNAKE_CASE__="relu" , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=0.3 , SCREAMING_SNAKE_CASE__=0.3 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=0.3 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=(7,) , SCREAMING_SNAKE_CASE__=(3,) , SCREAMING_SNAKE_CASE__=80 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__="sum" , SCREAMING_SNAKE_CASE__=False , **SCREAMING_SNAKE_CASE__ , ):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE__ , pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ )
snake_case: List[Any] = vocab_size
snake_case: Any = hidden_size
snake_case: List[str] = num_hidden_layers
snake_case: Union[str, Any] = intermediate_size
snake_case: str = num_attention_heads
snake_case: Optional[Any] = attention_head_dim
snake_case: int = max_position_embeddings
snake_case: int = layer_norm_eps
snake_case: str = layerdrop
snake_case: int = hidden_act
snake_case: List[str] = initializer_range
snake_case: Optional[int] = hidden_dropout_prob
snake_case: Optional[Any] = attention_probs_dropout_prob
snake_case: Any = pad_token_id
snake_case: Any = bos_token_id
snake_case: str = eos_token_id
snake_case: Union[str, Any] = conv_glu_dim
snake_case: Optional[int] = conv_dropout
snake_case: List[str] = num_conv_layers
snake_case: Dict = input_feat_per_channel
snake_case: List[Any] = input_channels
snake_case: str = conv_channels
snake_case: str = ctc_loss_reduction
snake_case: Optional[int] = ctc_zero_infinity
# prevents config testing fail with exporting to json
snake_case: int = list(SCREAMING_SNAKE_CASE__ )
snake_case: Dict = list(SCREAMING_SNAKE_CASE__ )
if len(self.conv_kernel ) != self.num_conv_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.conv_kernel)` == `config.num_conv_layers` '
F"""but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, """
F"""`config.num_conv_layers = {self.num_conv_layers}`.""" ) | 720 |
'''simple docstring'''
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
__UpperCAmelCase = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: Dict = question_encoder
snake_case: Union[str, Any] = generator
snake_case: Optional[int] = self.question_encoder
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if os.path.isfile(SCREAMING_SNAKE_CASE__ ):
raise ValueError(F"""Provided path ({save_directory}) should be a directory, not a file""" )
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
snake_case: Tuple = os.path.join(SCREAMING_SNAKE_CASE__ , 'question_encoder_tokenizer' )
snake_case: Tuple = os.path.join(SCREAMING_SNAKE_CASE__ , 'generator_tokenizer' )
self.question_encoder.save_pretrained(SCREAMING_SNAKE_CASE__ )
self.generator.save_pretrained(SCREAMING_SNAKE_CASE__ )
@classmethod
def _UpperCamelCase ( cls , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
from ..auto.tokenization_auto import AutoTokenizer
snake_case: int = kwargs.pop('config' , SCREAMING_SNAKE_CASE__ )
if config is None:
snake_case: str = RagConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = AutoTokenizer.from_pretrained(
SCREAMING_SNAKE_CASE__ , config=config.question_encoder , subfolder='question_encoder_tokenizer' )
snake_case: Dict = AutoTokenizer.from_pretrained(
SCREAMING_SNAKE_CASE__ , config=config.generator , subfolder='generator_tokenizer' )
return cls(question_encoder=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ )
def __call__( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return self.current_tokenizer(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return self.generator.batch_decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return self.generator.decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Union[str, Any] = self.question_encoder
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Dict = self.generator
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = "longest" , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = True , **SCREAMING_SNAKE_CASE__ , ):
'''simple docstring'''
warnings.warn(
'`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the '
'regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` '
'context manager to prepare your targets. See the documentation of your specific tokenizer for more '
'details' , SCREAMING_SNAKE_CASE__ , )
if max_length is None:
snake_case: Optional[Any] = self.current_tokenizer.model_max_length
snake_case: int = self(
SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
snake_case: Any = self.current_tokenizer.model_max_length
snake_case: List[str] = self(
text_target=SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
snake_case: Dict = labels['input_ids']
return model_inputs | 692 | 0 |
'''simple docstring'''
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: Optional[Any] = len(SCREAMING_SNAKE_CASE__ )
snake_case: Tuple = [0] * len_array
if len_array > 0:
snake_case: Optional[int] = array[0]
for i in range(1 , SCREAMING_SNAKE_CASE__ ):
snake_case: Tuple = self.prefix_sum[i - 1] + array[i]
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: int = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(SCREAMING_SNAKE_CASE__ )
return False
if __name__ == "__main__":
import doctest
doctest.testmod() | 721 |
'''simple docstring'''
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def lowerCAmelCase_ ( __A : Tuple ):
'''simple docstring'''
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def lowerCAmelCase_ ( ):
'''simple docstring'''
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def lowerCAmelCase_ ( ):
'''simple docstring'''
snake_case: Union[str, Any] = 'mock-s3-bucket'
snake_case: int = f"""s3://{mock_bucket}"""
snake_case: Any = extract_path_from_uri(__A )
assert dataset_path.startswith('s3://' ) is False
snake_case: Union[str, Any] = './local/path'
snake_case: Union[str, Any] = extract_path_from_uri(__A )
assert dataset_path == new_dataset_path
def lowerCAmelCase_ ( __A : Any ):
'''simple docstring'''
snake_case: List[str] = is_remote_filesystem(__A )
assert is_remote is True
snake_case: int = fsspec.filesystem('file' )
snake_case: int = is_remote_filesystem(__A )
assert is_remote is False
@pytest.mark.parametrize('compression_fs_class' , __A )
def lowerCAmelCase_ ( __A : Optional[int] , __A : int , __A : str , __A : Optional[Any] , __A : List[str] , __A : Optional[Any] , __A : Optional[int] ):
'''simple docstring'''
snake_case: Optional[Any] = {'gzip': gz_file, 'xz': xz_file, 'zstd': zstd_file, 'bz2': bza_file, 'lz4': lza_file}
snake_case: Optional[int] = input_paths[compression_fs_class.protocol]
if input_path is None:
snake_case: str = f"""for '{compression_fs_class.protocol}' compression protocol, """
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__A )
snake_case: List[str] = fsspec.filesystem(compression_fs_class.protocol , fo=__A )
assert isinstance(__A , __A )
snake_case: Any = os.path.basename(__A )
snake_case: int = expected_filename[: expected_filename.rindex('.' )]
assert fs.glob('*' ) == [expected_filename]
with fs.open(__A , 'r' , encoding='utf-8' ) as f, open(__A , encoding='utf-8' ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize('protocol' , ['zip', 'gzip'] )
def lowerCAmelCase_ ( __A : Any , __A : int , __A : int ):
'''simple docstring'''
snake_case: List[str] = {'zip': zip_jsonl_path, 'gzip': jsonl_gz_path}
snake_case: str = compressed_file_paths[protocol]
snake_case: Dict = 'dataset.jsonl'
snake_case: Optional[Any] = f"""{protocol}://{member_file_path}::{compressed_file_path}"""
snake_case , *snake_case: List[Any] = fsspec.get_fs_token_paths(__A )
assert fs.isfile(__A )
assert not fs.isfile('non_existing_' + member_file_path )
@pytest.mark.integration
def lowerCAmelCase_ ( __A : Optional[Any] , __A : Union[str, Any] , __A : Union[str, Any] , __A : List[Any] ):
'''simple docstring'''
snake_case: Tuple = hf_api.dataset_info(__A , token=__A )
snake_case: List[str] = HfFileSystem(repo_info=__A , token=__A )
assert sorted(hffs.glob('*' ) ) == [".gitattributes", "data"]
assert hffs.isdir('data' )
assert hffs.isfile('.gitattributes' ) and hffs.isfile('data/text_data.txt' )
with open(__A ) as f:
assert hffs.open('data/text_data.txt' , 'r' ).read() == f.read()
def lowerCAmelCase_ ( ):
'''simple docstring'''
snake_case: Union[str, Any] = 'bz2'
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(__A , __A , clobber=__A )
with pytest.warns(__A ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(__A ) == 1
assert (
str(warning_info[0].message )
== f"""A filesystem protocol was already set for {protocol} and will be overwritten."""
) | 692 | 0 |
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
__UpperCAmelCase = {
"cola": 2,
"mnli": 3,
"mrpc": 2,
"sst-2": 2,
"sts-b": 1,
"qqp": 2,
"qnli": 2,
"rte": 2,
"wnli": 2,
}
logging.set_verbosity_info()
def lowerCAmelCase_ ( __A : int , __A : Tuple , __A : int , __A : List[Any]=None ):
'''simple docstring'''
snake_case: List[str] = XLNetConfig.from_json_file(__A )
snake_case: List[str] = finetuning_task.lower() if finetuning_task is not None else ''
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(f"""Building PyTorch XLNetForSequenceClassification model from configuration: {config}""" )
snake_case: int = finetuning_task
snake_case: List[Any] = GLUE_TASKS_NUM_LABELS[finetuning_task]
snake_case: Optional[int] = XLNetForSequenceClassification(__A )
elif "squad" in finetuning_task:
snake_case: Dict = finetuning_task
snake_case: Optional[Any] = XLNetForQuestionAnswering(__A )
else:
snake_case: Dict = XLNetLMHeadModel(__A )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(__A , __A , __A )
# Save pytorch-model
snake_case: Optional[Any] = os.path.join(__A , __A )
snake_case: List[Any] = os.path.join(__A , __A )
print(f"""Save PyTorch model to {os.path.abspath(__A )}""" )
torch.save(model.state_dict() , __A )
print(f"""Save configuration file to {os.path.abspath(__A )}""" )
with open(__A , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--xlnet_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained XLNet model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--finetuning_task",
default=None,
type=str,
help="Name of a task on which the XLNet TensorFlow model was fine-tuned",
)
__UpperCAmelCase = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
) | 700 |
'''simple docstring'''
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.17.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
__UpperCAmelCase = logging.getLogger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
__UpperCamelCase = field(
default="tab_fact" , metadata={"help": "The name of the dataset to use (via the datasets library)."} )
__UpperCamelCase = field(
default="tab_fact" , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} , )
__UpperCamelCase = field(
default=1024 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__UpperCamelCase = field(
default=snake_case , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
__UpperCamelCase = field(
default=snake_case , metadata={
"help": (
"Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
)
} , )
__UpperCamelCase = field(
default=snake_case , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
__UpperCamelCase = field(
default=snake_case , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
__UpperCamelCase = field(
default=snake_case , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
)
} , )
__UpperCamelCase = field(
default=snake_case , metadata={"help": "A csv or a json file containing the training data."} )
__UpperCamelCase = field(
default=snake_case , metadata={"help": "A csv or a json file containing the validation data."} )
__UpperCamelCase = field(default=snake_case , metadata={"help": "A csv or a json file containing the test data."} )
def _UpperCamelCase ( self ):
'''simple docstring'''
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('Need either a GLUE task, a training/validation file or a dataset name.' )
else:
snake_case: str = self.train_file.split('.' )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
snake_case: Optional[Any] = self.validation_file.split('.' )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
__UpperCamelCase = field(
default=snake_case , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
__UpperCamelCase = field(
default=snake_case , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
__UpperCamelCase = field(
default=snake_case , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
__UpperCamelCase = field(
default=snake_case , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
__UpperCamelCase = field(
default=snake_case , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
__UpperCamelCase = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
__UpperCamelCase = field(
default=snake_case , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
def lowerCAmelCase_ ( ):
'''simple docstring'''
snake_case: Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
snake_case , snake_case , snake_case: Union[str, Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
snake_case , snake_case , snake_case: str = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
snake_case: Tuple = training_args.get_process_log_level()
logger.setLevel(__A )
datasets.utils.logging.set_verbosity(__A )
transformers.utils.logging.set_verbosity(__A )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
snake_case: Any = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
snake_case: List[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
snake_case: int = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
snake_case: Optional[int] = {'train': data_args.train_file, 'validation': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
snake_case: Tuple = data_args.train_file.split('.' )[-1]
snake_case: Union[str, Any] = data_args.test_file.split('.' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
snake_case: Union[str, Any] = data_args.test_file
else:
raise ValueError('Need either a GLUE task or a test file for `do_predict`.' )
for key in data_files.keys():
logger.info(f"""load a local file for {key}: {data_files[key]}""" )
if data_args.train_file.endswith('.csv' ):
# Loading a dataset from local csv files
snake_case: List[Any] = load_dataset('csv' , data_files=__A , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
snake_case: Optional[Any] = load_dataset('json' , data_files=__A , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
snake_case: Tuple = raw_datasets['train'].features['label'].names
snake_case: List[str] = len(__A )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case: Tuple = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__A , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
snake_case: List[str] = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=__A , )
snake_case: Union[str, Any] = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=__A , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
snake_case: int = 'max_length'
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
snake_case: Union[str, Any] = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
snake_case: Optional[Any] = {'Refused': 0, 'Entailed': 1}
snake_case: List[Any] = {0: 'Refused', 1: 'Entailed'}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
snake_case: List[str] = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(__A : Any ):
# Tokenize the texts
def _convert_table_text_to_pandas(__A : Dict ):
snake_case: str = [_table_row.split('#' ) for _table_row in _table_text.strip('\n' ).split('\n' )]
snake_case: List[str] = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
snake_case: str = examples['statement']
snake_case: int = list(map(_convert_table_text_to_pandas , examples['table_text'] ) )
snake_case: List[Any] = tokenizer(__A , __A , padding=__A , max_length=__A , truncation=__A )
snake_case: List[Any] = examples['label']
return result
with training_args.main_process_first(desc='dataset map pre-processing' ):
snake_case: int = raw_datasets.map(
__A , batched=__A , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on dataset' , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('--do_train requires a train dataset' )
snake_case: List[str] = raw_datasets['train']
if data_args.max_train_samples is not None:
snake_case: Tuple = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('--do_eval requires a validation dataset' )
snake_case: Any = raw_datasets['validation']
if data_args.max_eval_samples is not None:
snake_case: Optional[int] = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('--do_predict requires a test dataset' )
snake_case: str = raw_datasets['test']
if data_args.max_predict_samples is not None:
snake_case: List[str] = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(__A ) ) , 3 ):
logger.info(f"""Sample {index} of the training set: {train_dataset[index]}.""" )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(__A : EvalPrediction ):
snake_case: int = p.predictions[0] if isinstance(p.predictions , __A ) else p.predictions
snake_case: List[str] = np.argmax(__A , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
snake_case: str = default_data_collator
elif training_args.fpaa:
snake_case: List[str] = DataCollatorWithPadding(__A , pad_to_multiple_of=8 )
else:
snake_case: List[Any] = None
# Initialize our Trainer
snake_case: List[str] = Trainer(
model=__A , args=__A , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=__A , tokenizer=__A , data_collator=__A , )
# Training
if training_args.do_train:
snake_case: Optional[int] = None
if training_args.resume_from_checkpoint is not None:
snake_case: str = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
snake_case: Optional[Any] = last_checkpoint
snake_case: Union[str, Any] = trainer.train(resume_from_checkpoint=__A )
snake_case: List[Any] = train_result.metrics
snake_case: List[Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__A )
)
snake_case: Optional[Any] = min(__A , len(__A ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('train' , __A )
trainer.save_metrics('train' , __A )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
snake_case: Dict = trainer.evaluate(eval_dataset=__A )
snake_case: Optional[int] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__A )
snake_case: Dict = min(__A , len(__A ) )
trainer.log_metrics('eval' , __A )
trainer.save_metrics('eval' , __A )
if training_args.do_predict:
logger.info('*** Predict ***' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
snake_case: Optional[int] = predict_dataset.remove_columns('label' )
snake_case: str = trainer.predict(__A , metric_key_prefix='predict' ).predictions
snake_case: Any = np.argmax(__A , axis=1 )
snake_case: int = os.path.join(training_args.output_dir , 'predict_results_tabfact.txt' )
if trainer.is_world_process_zero():
with open(__A , 'w' ) as writer:
logger.info('***** Predict Results *****' )
writer.write('index\tprediction\n' )
for index, item in enumerate(__A ):
snake_case: int = label_list[item]
writer.write(f"""{index}\t{item}\n""" )
snake_case: Optional[int] = {'finetuned_from': model_args.model_name_or_path, 'tasks': 'text-classification'}
if training_args.push_to_hub:
trainer.push_to_hub(**__A )
else:
trainer.create_model_card(**__A )
def lowerCAmelCase_ ( __A : Optional[Any] ):
'''simple docstring'''
main()
if __name__ == "__main__":
main() | 692 | 0 |
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = StableDiffusionControlNetImgaImgPipeline
__UpperCamelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
__UpperCamelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__UpperCamelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"control_image"} )
__UpperCamelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _UpperCamelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
snake_case: int = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
torch.manual_seed(0 )
snake_case: str = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
snake_case: Dict = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=SCREAMING_SNAKE_CASE__ , set_alpha_to_one=SCREAMING_SNAKE_CASE__ , )
torch.manual_seed(0 )
snake_case: Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
snake_case: List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
snake_case: int = CLIPTextModel(SCREAMING_SNAKE_CASE__ )
snake_case: List[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
snake_case: Tuple = {
'unet': unet,
'controlnet': controlnet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=0 ):
'''simple docstring'''
if str(SCREAMING_SNAKE_CASE__ ).startswith('mps' ):
snake_case: Any = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
snake_case: Any = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
snake_case: Tuple = 2
snake_case: Tuple = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=SCREAMING_SNAKE_CASE__ , device=torch.device(SCREAMING_SNAKE_CASE__ ) , )
snake_case: int = floats_tensor(control_image.shape , rng=random.Random(SCREAMING_SNAKE_CASE__ ) ).to(SCREAMING_SNAKE_CASE__ )
snake_case: Tuple = image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case: Any = Image.fromarray(np.uinta(SCREAMING_SNAKE_CASE__ ) ).convert('RGB' ).resize((64, 64) )
snake_case: Tuple = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
'image': image,
'control_image': control_image,
}
return inputs
def _UpperCamelCase ( self ):
'''simple docstring'''
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def _UpperCamelCase ( self ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def _UpperCamelCase ( self ):
'''simple docstring'''
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
class SCREAMING_SNAKE_CASE ( snake_case , snake_case , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = StableDiffusionControlNetImgaImgPipeline
__UpperCamelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
__UpperCamelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__UpperCamelCase = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def _UpperCamelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
snake_case: Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(SCREAMING_SNAKE_CASE__ ):
if isinstance(SCREAMING_SNAKE_CASE__ , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
snake_case: Optional[int] = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(SCREAMING_SNAKE_CASE__ )
torch.manual_seed(0 )
snake_case: List[Any] = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(SCREAMING_SNAKE_CASE__ )
torch.manual_seed(0 )
snake_case: List[Any] = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=SCREAMING_SNAKE_CASE__ , set_alpha_to_one=SCREAMING_SNAKE_CASE__ , )
torch.manual_seed(0 )
snake_case: Tuple = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
snake_case: Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
snake_case: Optional[Any] = CLIPTextModel(SCREAMING_SNAKE_CASE__ )
snake_case: Union[str, Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
snake_case: Union[str, Any] = MultiControlNetModel([controlneta, controlneta] )
snake_case: Union[str, Any] = {
'unet': unet,
'controlnet': controlnet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=0 ):
'''simple docstring'''
if str(SCREAMING_SNAKE_CASE__ ).startswith('mps' ):
snake_case: Optional[Any] = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
snake_case: Optional[Any] = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
snake_case: int = 2
snake_case: int = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=SCREAMING_SNAKE_CASE__ , device=torch.device(SCREAMING_SNAKE_CASE__ ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=SCREAMING_SNAKE_CASE__ , device=torch.device(SCREAMING_SNAKE_CASE__ ) , ),
]
snake_case: int = floats_tensor(control_image[0].shape , rng=random.Random(SCREAMING_SNAKE_CASE__ ) ).to(SCREAMING_SNAKE_CASE__ )
snake_case: Dict = image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case: Union[str, Any] = Image.fromarray(np.uinta(SCREAMING_SNAKE_CASE__ ) ).convert('RGB' ).resize((64, 64) )
snake_case: int = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
'image': image,
'control_image': control_image,
}
return inputs
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[Any] = self.get_dummy_components()
snake_case: int = self.pipeline_class(**SCREAMING_SNAKE_CASE__ )
pipe.to(SCREAMING_SNAKE_CASE__ )
snake_case: Union[str, Any] = 10.0
snake_case: List[str] = 4
snake_case: Optional[Any] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
snake_case: List[str] = steps
snake_case: List[Any] = scale
snake_case: List[str] = pipe(**SCREAMING_SNAKE_CASE__ )[0]
snake_case: List[Any] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
snake_case: Any = steps
snake_case: Optional[Any] = scale
snake_case: str = pipe(**SCREAMING_SNAKE_CASE__ , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
snake_case: Dict = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
snake_case: List[str] = steps
snake_case: Any = scale
snake_case: Optional[Any] = pipe(**SCREAMING_SNAKE_CASE__ , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
snake_case: Tuple = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = steps
snake_case: Any = scale
snake_case: Tuple = pipe(**SCREAMING_SNAKE_CASE__ , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
def _UpperCamelCase ( self ):
'''simple docstring'''
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def _UpperCamelCase ( self ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def _UpperCamelCase ( self ):
'''simple docstring'''
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[str] = self.get_dummy_components()
snake_case: str = self.pipeline_class(**SCREAMING_SNAKE_CASE__ )
pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(SCREAMING_SNAKE_CASE__ )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Tuple = ControlNetModel.from_pretrained('lllyasviel/sd-controlnet-canny' )
snake_case: List[Any] = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , safety_checker=SCREAMING_SNAKE_CASE__ , controlnet=SCREAMING_SNAKE_CASE__ )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
snake_case: Optional[Any] = torch.Generator(device='cpu' ).manual_seed(0 )
snake_case: Union[str, Any] = 'evil space-punk bird'
snake_case: int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' ).resize((5_12, 5_12) )
snake_case: Union[str, Any] = load_image(
'https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png' ).resize((5_12, 5_12) )
snake_case: int = pipe(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , control_image=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , output_type='np' , num_inference_steps=50 , strength=0.6 , )
snake_case: Optional[int] = output.images[0]
assert image.shape == (5_12, 5_12, 3)
snake_case: int = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy' )
assert np.abs(expected_image - image ).max() < 9E-2 | 701 |
'''simple docstring'''
import math
def lowerCAmelCase_ ( __A : int ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__A ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCAmelCase_ ( __A : float = 0.1 ):
'''simple docstring'''
snake_case: Optional[int] = 3
snake_case: int = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(__A )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod() | 692 | 0 |
'''simple docstring'''
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def lowerCAmelCase_ ( ):
'''simple docstring'''
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
snake_case: Union[str, Any] = '__test_patch_submodule_mock__'
with patch_submodule(_test_patching , 'os.path.join' , __A ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def lowerCAmelCase_ ( ):
'''simple docstring'''
assert _test_patching.open is open
snake_case: str = '__test_patch_submodule_builtin_mock__'
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , 'open' , __A ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def lowerCAmelCase_ ( ):
'''simple docstring'''
snake_case: Any = '__test_patch_submodule_missing_mock__'
with patch_submodule(_test_patching , 'pandas.read_csv' , __A ):
pass
def lowerCAmelCase_ ( ):
'''simple docstring'''
snake_case: List[Any] = '__test_patch_submodule_missing_builtin_mock__'
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , 'len' , __A ) is None
with patch_submodule(_test_patching , 'len' , __A ):
assert _test_patching.len is mock
assert _test_patching.len is len
def lowerCAmelCase_ ( ):
'''simple docstring'''
snake_case: List[str] = '__test_patch_submodule_start_and_stop_mock__'
snake_case: str = patch_submodule(_test_patching , 'open' , __A )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def lowerCAmelCase_ ( ):
'''simple docstring'''
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
snake_case: Union[str, Any] = '__test_patch_submodule_successive_join__'
snake_case: Union[str, Any] = '__test_patch_submodule_successive_dirname__'
snake_case: Optional[Any] = '__test_patch_submodule_successive_rename__'
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , 'os.path.join' , __A ):
with patch_submodule(_test_patching , 'os.rename' , __A ):
with patch_submodule(_test_patching , 'os.path.dirname' , __A ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , 'os.rename' , __A ):
with patch_submodule(_test_patching , 'os.path.join' , __A ):
with patch_submodule(_test_patching , 'os.path.dirname' , __A ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def lowerCAmelCase_ ( ):
'''simple docstring'''
snake_case: Optional[Any] = '__test_patch_submodule_doesnt_exist_mock__'
with patch_submodule(_test_patching , '__module_that_doesn_exist__.__attribute_that_doesn_exist__' , __A ):
pass
with patch_submodule(_test_patching , 'os.__attribute_that_doesn_exist__' , __A ):
pass
| 702 |
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
__UpperCAmelCase = "pt"
elif is_tf_available():
__UpperCAmelCase = "tf"
else:
__UpperCAmelCase = "jax"
class SCREAMING_SNAKE_CASE ( snake_case , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = ByTaTokenizer
__UpperCamelCase = False
def _UpperCamelCase ( self ):
'''simple docstring'''
super().setUp()
snake_case: int = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _UpperCamelCase ( self ):
'''simple docstring'''
return ByTaTokenizer.from_pretrained('google/byt5-small' )
def _UpperCamelCase ( self , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=20 , SCREAMING_SNAKE_CASE__=5 ):
'''simple docstring'''
snake_case: Optional[Any] = []
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
try:
snake_case: Optional[Any] = tokenizer.decode([i] , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
snake_case: List[str] = list(filter(lambda SCREAMING_SNAKE_CASE__ : re.match(r'^[ a-zA-Z]+$' , t[1] ) , SCREAMING_SNAKE_CASE__ ) )
snake_case: str = list(filter(lambda SCREAMING_SNAKE_CASE__ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ) )
if max_length is not None and len(SCREAMING_SNAKE_CASE__ ) > max_length:
snake_case: Union[str, Any] = toks[:max_length]
if min_length is not None and len(SCREAMING_SNAKE_CASE__ ) < min_length and len(SCREAMING_SNAKE_CASE__ ) > 0:
while len(SCREAMING_SNAKE_CASE__ ) < min_length:
snake_case: Tuple = toks + toks
# toks_str = [t[1] for t in toks]
snake_case: Dict = [t[0] for t in toks]
# Ensure consistency
snake_case: int = tokenizer.decode(SCREAMING_SNAKE_CASE__ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__ )
if " " not in output_txt and len(SCREAMING_SNAKE_CASE__ ) > 1:
snake_case: str = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__ )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__ )
)
if with_prefix_space:
snake_case: Tuple = ' ' + output_txt
snake_case: Optional[int] = tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
return output_txt, output_ids
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Any = self.ta_base_tokenizer
snake_case: str = tokenizer(['hi</s>', 'I went to the gym</s>', '</s>'] )
snake_case: List[Any] = tokenizer(['hi', 'I went to the gym', ''] )
self.assertListEqual(batch_with_eos_added['input_ids'] , batch_without_eos_added['input_ids'] )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Any = self.ta_base_tokenizer
snake_case: Union[str, Any] = 'Unicode €.'
snake_case: str = tokenizer(SCREAMING_SNAKE_CASE__ )
snake_case: Tuple = [88, 1_13, 1_08, 1_02, 1_14, 1_03, 1_04, 35, 2_29, 1_33, 1_75, 49, 1]
self.assertEqual(encoded['input_ids'] , SCREAMING_SNAKE_CASE__ )
# decoding
snake_case: List[str] = tokenizer.decode(SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , 'Unicode €.</s>' )
snake_case: List[Any] = tokenizer('e è é ê ë' )
snake_case: Optional[Any] = [1_04, 35, 1_98, 1_71, 35, 1_98, 1_72, 35, 1_98, 1_73, 35, 1_98, 1_74, 1]
self.assertEqual(encoded['input_ids'] , SCREAMING_SNAKE_CASE__ )
# decoding
snake_case: List[Any] = tokenizer.decode(SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , 'e è é ê ë</s>' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , 'e è é ê ë</s>' )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: str = self.ta_base_tokenizer
snake_case: Optional[Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
snake_case: Optional[int] = [68, 35, 1_11, 1_14, 1_13, 1_06, 35, 1_15, 1_00, 1_17, 1_00, 1_06, 1_17, 1_00, 1_15, 1_07, 35, 1_05, 1_14, 1_17, 35, 1_18, 1_20, 1_12, 1_12, 1_00, 1_17, 1_08, 1_25, 1_00, 1_19, 1_08, 1_14, 1_13, 49, 1, 0]
# fmt: on
snake_case: str = tokenizer(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if FRAMEWORK != "jax":
snake_case: Optional[Any] = list(batch.input_ids.numpy()[0] )
else:
snake_case: Dict = list(batch.input_ids.tolist()[0] )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Dict = self.ta_base_tokenizer
snake_case: List[str] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
snake_case: Union[str, Any] = tokenizer(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , SCREAMING_SNAKE_CASE__ )
self.assertIn('attention_mask' , SCREAMING_SNAKE_CASE__ )
self.assertNotIn('decoder_input_ids' , SCREAMING_SNAKE_CASE__ )
self.assertNotIn('decoder_attention_mask' , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[str] = self.ta_base_tokenizer
snake_case: str = [
'Summary of the text.',
'Another summary.',
]
snake_case: Dict = tokenizer(
text_target=SCREAMING_SNAKE_CASE__ , max_length=32 , padding='max_length' , truncation=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ )
self.assertEqual(32 , targets['input_ids'].shape[1] )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Union[str, Any] = self.ta_base_tokenizer
snake_case: Optional[int] = ['A long paragraph for summarization. </s>']
snake_case: str = ['Summary of the text. </s>']
# fmt: off
snake_case: str = [68, 35, 1_11, 1_14, 1_13, 1_06, 35, 1_15, 1_00, 1_17, 1_00, 1_06, 1_17, 1_00, 1_15, 1_07, 35, 1_05, 1_14, 1_17, 35, 1_18, 1_20, 1_12, 1_12, 1_00, 1_17, 1_08, 1_25, 1_00, 1_19, 1_08, 1_14, 1_13, 49, 35, 1]
snake_case: Optional[int] = [86, 1_20, 1_12, 1_12, 1_00, 1_17, 1_24, 35, 1_14, 1_05, 35, 1_19, 1_07, 1_04, 35, 1_19, 1_04, 1_23, 1_19, 49, 35, 1]
# fmt: on
snake_case: List[Any] = tokenizer(SCREAMING_SNAKE_CASE__ , text_target=SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , batch['input_ids'][0] )
self.assertEqual(SCREAMING_SNAKE_CASE__ , batch['labels'][0] )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Any = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
snake_case: Optional[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
snake_case: Union[str, Any] = tempfile.mkdtemp()
snake_case: Dict = ' He is very happy, UNwant\u00E9d,running'
snake_case: Optional[int] = tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = tokenizer.__class__.from_pretrained(SCREAMING_SNAKE_CASE__ )
snake_case: Any = after_tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
shutil.rmtree(SCREAMING_SNAKE_CASE__ )
snake_case: Any = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
snake_case: List[str] = tempfile.mkdtemp()
snake_case: str = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
snake_case: List[str] = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
snake_case: int = tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
snake_case: Tuple = tokenizer.__class__.from_pretrained(SCREAMING_SNAKE_CASE__ )
snake_case: Tuple = after_tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
snake_case: Union[str, Any] = tokenizer.__class__.from_pretrained(SCREAMING_SNAKE_CASE__ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Union[str, Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(SCREAMING_SNAKE_CASE__ )
with open(os.path.join(SCREAMING_SNAKE_CASE__ , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
snake_case: Any = json.load(SCREAMING_SNAKE_CASE__ )
with open(os.path.join(SCREAMING_SNAKE_CASE__ , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
snake_case: str = json.load(SCREAMING_SNAKE_CASE__ )
snake_case: int = [F"""<extra_id_{i}>""" for i in range(1_25 )]
snake_case: Optional[int] = added_tokens_extra_ids + [
'an_additional_special_token'
]
snake_case: str = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(SCREAMING_SNAKE_CASE__ , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
with open(os.path.join(SCREAMING_SNAKE_CASE__ , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
snake_case: Dict = tokenizer_class.from_pretrained(
SCREAMING_SNAKE_CASE__ , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
snake_case: Union[str, Any] = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=SCREAMING_SNAKE_CASE__ )]
snake_case: Union[str, Any] = tokenizer_class.from_pretrained(
SCREAMING_SNAKE_CASE__ , additional_special_tokens=SCREAMING_SNAKE_CASE__ , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[str] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(SCREAMING_SNAKE_CASE__ )
snake_case: Optional[Any] = tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertTrue(tokenizer.decode([2_55] ) == '' )
def _UpperCamelCase ( self ):
'''simple docstring'''
pass
def _UpperCamelCase ( self ):
'''simple docstring'''
pass
def _UpperCamelCase ( self ):
'''simple docstring'''
pass
def _UpperCamelCase ( self ):
'''simple docstring'''
pass
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Dict = self.get_tokenizers(fast=SCREAMING_SNAKE_CASE__ , do_lower_case=SCREAMING_SNAKE_CASE__ )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
snake_case: Union[str, Any] = ['t', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 'x', 't', '</s>']
snake_case: List[str] = tokenizer.convert_tokens_to_string(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: int = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
snake_case: Optional[Any] = [
'bos_token',
'eos_token',
'unk_token',
'sep_token',
'pad_token',
'cls_token',
'mask_token',
]
snake_case: Dict = 0
snake_case: List[Any] = tokenizer.convert_ids_to_tokens(
SCREAMING_SNAKE_CASE__ , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
for attr in attributes_list:
setattr(SCREAMING_SNAKE_CASE__ , attr + '_id' , SCREAMING_SNAKE_CASE__ )
self.assertEqual(getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
self.assertEqual(getattr(SCREAMING_SNAKE_CASE__ , attr + '_id' ) , SCREAMING_SNAKE_CASE__ )
setattr(SCREAMING_SNAKE_CASE__ , attr + '_id' , SCREAMING_SNAKE_CASE__ )
self.assertEqual(getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
self.assertEqual(getattr(SCREAMING_SNAKE_CASE__ , attr + '_id' ) , SCREAMING_SNAKE_CASE__ )
setattr(SCREAMING_SNAKE_CASE__ , 'additional_special_tokens_ids' , [] )
self.assertListEqual(getattr(SCREAMING_SNAKE_CASE__ , 'additional_special_tokens' ) , [] )
self.assertListEqual(getattr(SCREAMING_SNAKE_CASE__ , 'additional_special_tokens_ids' ) , [] )
setattr(SCREAMING_SNAKE_CASE__ , 'additional_special_tokens_ids' , [token_id_to_test_setters] )
self.assertListEqual(getattr(SCREAMING_SNAKE_CASE__ , 'additional_special_tokens' ) , [token_to_test_setters] )
self.assertListEqual(getattr(SCREAMING_SNAKE_CASE__ , 'additional_special_tokens_ids' ) , [token_id_to_test_setters] ) | 692 | 0 |
'''simple docstring'''
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=99 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=36 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=5_12 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=None , ):
'''simple docstring'''
snake_case: List[str] = parent
snake_case: Any = batch_size
snake_case: str = seq_length
snake_case: Tuple = is_training
snake_case: Optional[int] = use_input_mask
snake_case: List[str] = use_token_type_ids
snake_case: Optional[int] = use_labels
snake_case: List[Any] = vocab_size
snake_case: Union[str, Any] = hidden_size
snake_case: int = num_hidden_layers
snake_case: List[str] = num_attention_heads
snake_case: Any = intermediate_size
snake_case: Tuple = hidden_act
snake_case: Tuple = hidden_dropout_prob
snake_case: Optional[int] = attention_probs_dropout_prob
snake_case: List[Any] = max_position_embeddings
snake_case: Union[str, Any] = type_vocab_size
snake_case: Any = type_sequence_label_size
snake_case: Dict = initializer_range
snake_case: Any = num_labels
snake_case: str = num_choices
snake_case: List[str] = scope
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case: Tuple = None
if self.use_input_mask:
snake_case: List[str] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case: Dict = None
if self.use_token_type_ids:
snake_case: Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case: Any = None
snake_case: Any = None
snake_case: List[str] = None
if self.use_labels:
snake_case: List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case: Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case: Any = ids_tensor([self.batch_size] , self.num_choices )
snake_case: Optional[int] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCamelCase ( self ):
'''simple docstring'''
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE__ , initializer_range=self.initializer_range , )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: int = self.get_config()
snake_case: Any = 3_00
return config
def _UpperCamelCase ( self ):
'''simple docstring'''
(
snake_case
): Optional[Any] = self.prepare_config_and_inputs()
snake_case: List[Any] = True
snake_case: Tuple = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
snake_case: str = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: int = MraModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
snake_case: str = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ )
snake_case: List[str] = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ )
snake_case: int = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ):
'''simple docstring'''
snake_case: List[str] = True
snake_case: Optional[Any] = MraModel(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
snake_case: str = model(
SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , encoder_hidden_states=SCREAMING_SNAKE_CASE__ , encoder_attention_mask=SCREAMING_SNAKE_CASE__ , )
snake_case: Optional[int] = model(
SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , encoder_hidden_states=SCREAMING_SNAKE_CASE__ , )
snake_case: Tuple = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: List[str] = MraForMaskedLM(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
snake_case: Tuple = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: Optional[int] = MraForQuestionAnswering(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
snake_case: Any = model(
SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , start_positions=SCREAMING_SNAKE_CASE__ , end_positions=SCREAMING_SNAKE_CASE__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: List[Any] = self.num_labels
snake_case: Tuple = MraForSequenceClassification(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
snake_case: Optional[int] = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: str = self.num_labels
snake_case: List[Any] = MraForTokenClassification(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
snake_case: Optional[Any] = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: Any = self.num_choices
snake_case: Tuple = MraForMultipleChoice(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
snake_case: Optional[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case: Any = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case: Any = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case: str = model(
SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Union[str, Any] = self.prepare_config_and_inputs()
(
snake_case
): Union[str, Any] = config_and_inputs
snake_case: Any = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( snake_case , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = ()
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Any = MraModelTester(self )
snake_case: List[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , hidden_size=37 )
def _UpperCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case: int = type
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE__ )
@slow
def _UpperCamelCase ( self ):
'''simple docstring'''
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case: Tuple = MraModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
@unittest.skip(reason='MRA does not output attentions' )
def _UpperCamelCase ( self ):
'''simple docstring'''
return
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@slow
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Dict = MraModel.from_pretrained('uw-madison/mra-base-512-4' )
snake_case: Dict = torch.arange(2_56 ).unsqueeze(0 )
with torch.no_grad():
snake_case: Any = model(SCREAMING_SNAKE_CASE__ )[0]
snake_case: Dict = torch.Size((1, 2_56, 7_68) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE__ )
snake_case: Tuple = torch.tensor(
[[[-0.01_40, 0.08_30, -0.03_81], [0.15_46, 0.14_02, 0.02_20], [0.11_62, 0.08_51, 0.01_65]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 ) )
@slow
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Union[str, Any] = MraForMaskedLM.from_pretrained('uw-madison/mra-base-512-4' )
snake_case: Optional[Any] = torch.arange(2_56 ).unsqueeze(0 )
with torch.no_grad():
snake_case: int = model(SCREAMING_SNAKE_CASE__ )[0]
snake_case: Dict = 5_02_65
snake_case: int = torch.Size((1, 2_56, vocab_size) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE__ )
snake_case: List[Any] = torch.tensor(
[[[9.25_95, -3.60_38, 11.88_19], [9.38_69, -3.26_93, 11.09_56], [11.85_24, -3.49_38, 13.12_10]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 ) )
@slow
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Dict = MraForMaskedLM.from_pretrained('uw-madison/mra-base-4096-8-d3' )
snake_case: int = torch.arange(40_96 ).unsqueeze(0 )
with torch.no_grad():
snake_case: Dict = model(SCREAMING_SNAKE_CASE__ )[0]
snake_case: Union[str, Any] = 5_02_65
snake_case: Optional[Any] = torch.Size((1, 40_96, vocab_size) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE__ )
snake_case: str = torch.tensor(
[[[5.47_89, -2.35_64, 7.50_64], [7.90_67, -1.33_69, 9.96_68], [9.07_12, -1.81_06, 7.03_80]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 ) ) | 703 |
'''simple docstring'''
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = "geglu" , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = "layer_norm" , SCREAMING_SNAKE_CASE__ = False , ):
'''simple docstring'''
super().__init__()
snake_case: List[str] = only_cross_attention
snake_case: Optional[Any] = (num_embeds_ada_norm is not None) and norm_type == 'ada_norm_zero'
snake_case: Tuple = (num_embeds_ada_norm is not None) and norm_type == 'ada_norm'
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
F"""`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to"""
F""" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.""" )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
snake_case: List[str] = AdaLayerNorm(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif self.use_ada_layer_norm_zero:
snake_case: str = AdaLayerNormZero(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
snake_case: int = nn.LayerNorm(SCREAMING_SNAKE_CASE__ , elementwise_affine=SCREAMING_SNAKE_CASE__ )
snake_case: List[str] = Attention(
query_dim=SCREAMING_SNAKE_CASE__ , heads=SCREAMING_SNAKE_CASE__ , dim_head=SCREAMING_SNAKE_CASE__ , dropout=SCREAMING_SNAKE_CASE__ , bias=SCREAMING_SNAKE_CASE__ , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=SCREAMING_SNAKE_CASE__ , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
snake_case: Tuple = (
AdaLayerNorm(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if self.use_ada_layer_norm
else nn.LayerNorm(SCREAMING_SNAKE_CASE__ , elementwise_affine=SCREAMING_SNAKE_CASE__ )
)
snake_case: Any = Attention(
query_dim=SCREAMING_SNAKE_CASE__ , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=SCREAMING_SNAKE_CASE__ , dim_head=SCREAMING_SNAKE_CASE__ , dropout=SCREAMING_SNAKE_CASE__ , bias=SCREAMING_SNAKE_CASE__ , upcast_attention=SCREAMING_SNAKE_CASE__ , ) # is self-attn if encoder_hidden_states is none
else:
snake_case: int = None
snake_case: Tuple = None
# 3. Feed-forward
snake_case: Union[str, Any] = nn.LayerNorm(SCREAMING_SNAKE_CASE__ , elementwise_affine=SCREAMING_SNAKE_CASE__ )
snake_case: List[str] = FeedForward(SCREAMING_SNAKE_CASE__ , dropout=SCREAMING_SNAKE_CASE__ , activation_fn=SCREAMING_SNAKE_CASE__ , final_dropout=SCREAMING_SNAKE_CASE__ )
# let chunk size default to None
snake_case: Any = None
snake_case: Any = 0
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: Dict = chunk_size
snake_case: str = dim
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , ):
'''simple docstring'''
if self.use_ada_layer_norm:
snake_case: Optional[int] = self.norma(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif self.use_ada_layer_norm_zero:
snake_case , snake_case , snake_case , snake_case , snake_case: int = self.norma(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , hidden_dtype=hidden_states.dtype )
else:
snake_case: List[str] = self.norma(SCREAMING_SNAKE_CASE__ )
snake_case: Optional[Any] = cross_attention_kwargs if cross_attention_kwargs is not None else {}
snake_case: List[str] = self.attna(
SCREAMING_SNAKE_CASE__ , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
if self.use_ada_layer_norm_zero:
snake_case: Tuple = gate_msa.unsqueeze(1 ) * attn_output
snake_case: List[str] = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
snake_case: Dict = (
self.norma(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if self.use_ada_layer_norm else self.norma(SCREAMING_SNAKE_CASE__ )
)
snake_case: Any = self.attna(
SCREAMING_SNAKE_CASE__ , encoder_hidden_states=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
snake_case: List[str] = attn_output + hidden_states
# 3. Feed-forward
snake_case: str = self.norma(SCREAMING_SNAKE_CASE__ )
if self.use_ada_layer_norm_zero:
snake_case: str = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
F"""`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.""" )
snake_case: List[str] = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
snake_case: Optional[Any] = torch.cat(
[self.ff(SCREAMING_SNAKE_CASE__ ) for hid_slice in norm_hidden_states.chunk(SCREAMING_SNAKE_CASE__ , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
snake_case: int = self.ff(SCREAMING_SNAKE_CASE__ )
if self.use_ada_layer_norm_zero:
snake_case: Union[str, Any] = gate_mlp.unsqueeze(1 ) * ff_output
snake_case: Tuple = ff_output + hidden_states
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = 4 , SCREAMING_SNAKE_CASE__ = 0.0 , SCREAMING_SNAKE_CASE__ = "geglu" , SCREAMING_SNAKE_CASE__ = False , ):
'''simple docstring'''
super().__init__()
snake_case: int = int(dim * mult )
snake_case: Optional[Any] = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
snake_case: int = GELU(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if activation_fn == "gelu-approximate":
snake_case: Optional[Any] = GELU(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , approximate='tanh' )
elif activation_fn == "geglu":
snake_case: List[Any] = GEGLU(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif activation_fn == "geglu-approximate":
snake_case: Optional[int] = ApproximateGELU(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case: Any = nn.ModuleList([] )
# project in
self.net.append(SCREAMING_SNAKE_CASE__ )
# project dropout
self.net.append(nn.Dropout(SCREAMING_SNAKE_CASE__ ) )
# project out
self.net.append(nn.Linear(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(SCREAMING_SNAKE_CASE__ ) )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
for module in self.net:
snake_case: Optional[int] = module(SCREAMING_SNAKE_CASE__ )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = "none" ):
'''simple docstring'''
super().__init__()
snake_case: Optional[int] = nn.Linear(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = approximate
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if gate.device.type != "mps":
return F.gelu(SCREAMING_SNAKE_CASE__ , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: Dict = self.proj(SCREAMING_SNAKE_CASE__ )
snake_case: Dict = self.gelu(SCREAMING_SNAKE_CASE__ )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
super().__init__()
snake_case: Tuple = nn.Linear(SCREAMING_SNAKE_CASE__ , dim_out * 2 )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if gate.device.type != "mps":
return F.gelu(SCREAMING_SNAKE_CASE__ )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case , snake_case: int = self.proj(SCREAMING_SNAKE_CASE__ ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(SCREAMING_SNAKE_CASE__ )
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
super().__init__()
snake_case: str = nn.Linear(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: Optional[int] = self.proj(SCREAMING_SNAKE_CASE__ )
return x * torch.sigmoid(1.7_02 * x )
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
super().__init__()
snake_case: Optional[Any] = nn.Embedding(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case: Optional[Any] = nn.SiLU()
snake_case: Union[str, Any] = nn.Linear(SCREAMING_SNAKE_CASE__ , embedding_dim * 2 )
snake_case: int = nn.LayerNorm(SCREAMING_SNAKE_CASE__ , elementwise_affine=SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: Dict = self.linear(self.silu(self.emb(SCREAMING_SNAKE_CASE__ ) ) )
snake_case , snake_case: Dict = torch.chunk(SCREAMING_SNAKE_CASE__ , 2 )
snake_case: str = self.norm(SCREAMING_SNAKE_CASE__ ) * (1 + scale) + shift
return x
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
super().__init__()
snake_case: str = CombinedTimestepLabelEmbeddings(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case: int = nn.SiLU()
snake_case: Any = nn.Linear(SCREAMING_SNAKE_CASE__ , 6 * embedding_dim , bias=SCREAMING_SNAKE_CASE__ )
snake_case: List[Any] = nn.LayerNorm(SCREAMING_SNAKE_CASE__ , elementwise_affine=SCREAMING_SNAKE_CASE__ , eps=1E-6 )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
snake_case: int = self.linear(self.silu(self.emb(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , hidden_dtype=SCREAMING_SNAKE_CASE__ ) ) )
snake_case , snake_case , snake_case , snake_case , snake_case , snake_case: str = emb.chunk(6 , dim=1 )
snake_case: Dict = self.norm(SCREAMING_SNAKE_CASE__ ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = 1E-5 ):
'''simple docstring'''
super().__init__()
snake_case: str = num_groups
snake_case: str = eps
if act_fn is None:
snake_case: Dict = None
else:
snake_case: List[str] = get_activation(SCREAMING_SNAKE_CASE__ )
snake_case: Any = nn.Linear(SCREAMING_SNAKE_CASE__ , out_dim * 2 )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if self.act:
snake_case: Optional[Any] = self.act(SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = self.linear(SCREAMING_SNAKE_CASE__ )
snake_case: List[Any] = emb[:, :, None, None]
snake_case , snake_case: List[Any] = emb.chunk(2 , dim=1 )
snake_case: Any = F.group_norm(SCREAMING_SNAKE_CASE__ , self.num_groups , eps=self.eps )
snake_case: Optional[int] = x * (1 + scale) + shift
return x | 692 | 0 |
'''simple docstring'''
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = OrderedDict(
[
# Base model mapping
("albert", "FlaxAlbertModel"),
("bart", "FlaxBartModel"),
("beit", "FlaxBeitModel"),
("bert", "FlaxBertModel"),
("big_bird", "FlaxBigBirdModel"),
("blenderbot", "FlaxBlenderbotModel"),
("blenderbot-small", "FlaxBlenderbotSmallModel"),
("clip", "FlaxCLIPModel"),
("distilbert", "FlaxDistilBertModel"),
("electra", "FlaxElectraModel"),
("gpt-sw3", "FlaxGPT2Model"),
("gpt2", "FlaxGPT2Model"),
("gpt_neo", "FlaxGPTNeoModel"),
("gptj", "FlaxGPTJModel"),
("longt5", "FlaxLongT5Model"),
("marian", "FlaxMarianModel"),
("mbart", "FlaxMBartModel"),
("mt5", "FlaxMT5Model"),
("opt", "FlaxOPTModel"),
("pegasus", "FlaxPegasusModel"),
("regnet", "FlaxRegNetModel"),
("resnet", "FlaxResNetModel"),
("roberta", "FlaxRobertaModel"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormModel"),
("roformer", "FlaxRoFormerModel"),
("t5", "FlaxT5Model"),
("vision-text-dual-encoder", "FlaxVisionTextDualEncoderModel"),
("vit", "FlaxViTModel"),
("wav2vec2", "FlaxWav2Vec2Model"),
("whisper", "FlaxWhisperModel"),
("xglm", "FlaxXGLMModel"),
("xlm-roberta", "FlaxXLMRobertaModel"),
]
)
__UpperCAmelCase = OrderedDict(
[
# Model for pre-training mapping
("albert", "FlaxAlbertForPreTraining"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForPreTraining"),
("big_bird", "FlaxBigBirdForPreTraining"),
("electra", "FlaxElectraForPreTraining"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("t5", "FlaxT5ForConditionalGeneration"),
("wav2vec2", "FlaxWav2Vec2ForPreTraining"),
("whisper", "FlaxWhisperForConditionalGeneration"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
__UpperCAmelCase = OrderedDict(
[
# Model for Masked LM mapping
("albert", "FlaxAlbertForMaskedLM"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForMaskedLM"),
("big_bird", "FlaxBigBirdForMaskedLM"),
("distilbert", "FlaxDistilBertForMaskedLM"),
("electra", "FlaxElectraForMaskedLM"),
("mbart", "FlaxMBartForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
__UpperCAmelCase = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
("bart", "FlaxBartForConditionalGeneration"),
("blenderbot", "FlaxBlenderbotForConditionalGeneration"),
("blenderbot-small", "FlaxBlenderbotSmallForConditionalGeneration"),
("encoder-decoder", "FlaxEncoderDecoderModel"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("marian", "FlaxMarianMTModel"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("pegasus", "FlaxPegasusForConditionalGeneration"),
("t5", "FlaxT5ForConditionalGeneration"),
]
)
__UpperCAmelCase = OrderedDict(
[
# Model for Image-classsification
("beit", "FlaxBeitForImageClassification"),
("regnet", "FlaxRegNetForImageClassification"),
("resnet", "FlaxResNetForImageClassification"),
("vit", "FlaxViTForImageClassification"),
]
)
__UpperCAmelCase = OrderedDict(
[
("vision-encoder-decoder", "FlaxVisionEncoderDecoderModel"),
]
)
__UpperCAmelCase = OrderedDict(
[
# Model for Causal LM mapping
("bart", "FlaxBartForCausalLM"),
("bert", "FlaxBertForCausalLM"),
("big_bird", "FlaxBigBirdForCausalLM"),
("electra", "FlaxElectraForCausalLM"),
("gpt-sw3", "FlaxGPT2LMHeadModel"),
("gpt2", "FlaxGPT2LMHeadModel"),
("gpt_neo", "FlaxGPTNeoForCausalLM"),
("gptj", "FlaxGPTJForCausalLM"),
("opt", "FlaxOPTForCausalLM"),
("roberta", "FlaxRobertaForCausalLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForCausalLM"),
("xglm", "FlaxXGLMForCausalLM"),
("xlm-roberta", "FlaxXLMRobertaForCausalLM"),
]
)
__UpperCAmelCase = OrderedDict(
[
# Model for Sequence Classification mapping
("albert", "FlaxAlbertForSequenceClassification"),
("bart", "FlaxBartForSequenceClassification"),
("bert", "FlaxBertForSequenceClassification"),
("big_bird", "FlaxBigBirdForSequenceClassification"),
("distilbert", "FlaxDistilBertForSequenceClassification"),
("electra", "FlaxElectraForSequenceClassification"),
("mbart", "FlaxMBartForSequenceClassification"),
("roberta", "FlaxRobertaForSequenceClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForSequenceClassification"),
("roformer", "FlaxRoFormerForSequenceClassification"),
("xlm-roberta", "FlaxXLMRobertaForSequenceClassification"),
]
)
__UpperCAmelCase = OrderedDict(
[
# Model for Question Answering mapping
("albert", "FlaxAlbertForQuestionAnswering"),
("bart", "FlaxBartForQuestionAnswering"),
("bert", "FlaxBertForQuestionAnswering"),
("big_bird", "FlaxBigBirdForQuestionAnswering"),
("distilbert", "FlaxDistilBertForQuestionAnswering"),
("electra", "FlaxElectraForQuestionAnswering"),
("mbart", "FlaxMBartForQuestionAnswering"),
("roberta", "FlaxRobertaForQuestionAnswering"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForQuestionAnswering"),
("roformer", "FlaxRoFormerForQuestionAnswering"),
("xlm-roberta", "FlaxXLMRobertaForQuestionAnswering"),
]
)
__UpperCAmelCase = OrderedDict(
[
# Model for Token Classification mapping
("albert", "FlaxAlbertForTokenClassification"),
("bert", "FlaxBertForTokenClassification"),
("big_bird", "FlaxBigBirdForTokenClassification"),
("distilbert", "FlaxDistilBertForTokenClassification"),
("electra", "FlaxElectraForTokenClassification"),
("roberta", "FlaxRobertaForTokenClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForTokenClassification"),
("roformer", "FlaxRoFormerForTokenClassification"),
("xlm-roberta", "FlaxXLMRobertaForTokenClassification"),
]
)
__UpperCAmelCase = OrderedDict(
[
# Model for Multiple Choice mapping
("albert", "FlaxAlbertForMultipleChoice"),
("bert", "FlaxBertForMultipleChoice"),
("big_bird", "FlaxBigBirdForMultipleChoice"),
("distilbert", "FlaxDistilBertForMultipleChoice"),
("electra", "FlaxElectraForMultipleChoice"),
("roberta", "FlaxRobertaForMultipleChoice"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMultipleChoice"),
("roformer", "FlaxRoFormerForMultipleChoice"),
("xlm-roberta", "FlaxXLMRobertaForMultipleChoice"),
]
)
__UpperCAmelCase = OrderedDict(
[
("bert", "FlaxBertForNextSentencePrediction"),
]
)
__UpperCAmelCase = OrderedDict(
[
("speech-encoder-decoder", "FlaxSpeechEncoderDecoderModel"),
("whisper", "FlaxWhisperForConditionalGeneration"),
]
)
__UpperCAmelCase = OrderedDict(
[
("whisper", "FlaxWhisperForAudioClassification"),
]
)
__UpperCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
__UpperCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
__UpperCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
__UpperCAmelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
__UpperCAmelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
__UpperCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
__UpperCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
__UpperCAmelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
__UpperCAmelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
__UpperCAmelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
__UpperCAmelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
__UpperCAmelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
__UpperCAmelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
__UpperCAmelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
__UpperCamelCase = FLAX_MODEL_MAPPING
__UpperCAmelCase = auto_class_update(FlaxAutoModel)
class SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
__UpperCamelCase = FLAX_MODEL_FOR_PRETRAINING_MAPPING
__UpperCAmelCase = auto_class_update(FlaxAutoModelForPreTraining, head_doc="pretraining")
class SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
__UpperCamelCase = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
__UpperCAmelCase = auto_class_update(FlaxAutoModelForCausalLM, head_doc="causal language modeling")
class SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
__UpperCamelCase = FLAX_MODEL_FOR_MASKED_LM_MAPPING
__UpperCAmelCase = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="masked language modeling")
class SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
__UpperCamelCase = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__UpperCAmelCase = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc="sequence-to-sequence language modeling", checkpoint_for_example="t5-base"
)
class SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
__UpperCamelCase = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
__UpperCAmelCase = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc="sequence classification"
)
class SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
__UpperCamelCase = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
__UpperCAmelCase = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="question answering")
class SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
__UpperCamelCase = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
__UpperCAmelCase = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc="token classification"
)
class SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
__UpperCamelCase = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
__UpperCAmelCase = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="multiple choice")
class SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
__UpperCamelCase = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
__UpperCAmelCase = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc="next sentence prediction"
)
class SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
__UpperCamelCase = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
__UpperCAmelCase = auto_class_update(
FlaxAutoModelForImageClassification, head_doc="image classification"
)
class SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
__UpperCamelCase = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
__UpperCAmelCase = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="vision-to-text modeling")
class SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
__UpperCamelCase = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
__UpperCAmelCase = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc="sequence-to-sequence speech-to-text modeling"
)
| 704 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class SCREAMING_SNAKE_CASE ( snake_case , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = RoCBertTokenizer
__UpperCamelCase = None
__UpperCamelCase = False
__UpperCamelCase = True
__UpperCamelCase = filter_non_english
def _UpperCamelCase ( self ):
'''simple docstring'''
super().setUp()
snake_case: Any = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', '你', '好', '是', '谁', 'a', 'b', 'c', 'd']
snake_case: List[Any] = {}
snake_case: List[str] = {}
for i, value in enumerate(SCREAMING_SNAKE_CASE__ ):
snake_case: List[Any] = i
snake_case: Union[str, Any] = i
snake_case: List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
snake_case: Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['word_shape_file'] )
snake_case: str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['word_pronunciation_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
with open(self.word_shape_file , 'w' , encoding='utf-8' ) as word_shape_writer:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ensure_ascii=SCREAMING_SNAKE_CASE__ )
with open(self.word_pronunciation_file , 'w' , encoding='utf-8' ) as word_pronunciation_writer:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ensure_ascii=SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Dict = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
snake_case: Dict = tokenizer.tokenize('你好[SEP]你是谁' )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , ['你', '好', '[SEP]', '你', '是', '谁'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(SCREAMING_SNAKE_CASE__ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(SCREAMING_SNAKE_CASE__ ) , [5, 6, 2, 5, 7, 8] )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Any = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[int] = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Union[str, Any] = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ , strip_accents=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: str = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ , strip_accents=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[int] = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: int = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Tuple = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ , strip_accents=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Any = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ , strip_accents=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: str = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Union[str, Any] = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
snake_case: Union[str, Any] = {}
for i, token in enumerate(SCREAMING_SNAKE_CASE__ ):
snake_case: str = i
snake_case: Optional[int] = RoCBertWordpieceTokenizer(vocab=SCREAMING_SNAKE_CASE__ , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
def _UpperCamelCase ( self ):
'''simple docstring'''
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def _UpperCamelCase ( self ):
'''simple docstring'''
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def _UpperCamelCase ( self ):
'''simple docstring'''
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[int] = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(SCREAMING_SNAKE_CASE__ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
if self.test_rust_tokenizer:
snake_case: int = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE__ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
def _UpperCamelCase ( self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
snake_case: Tuple = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
snake_case: Optional[Any] = F"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
snake_case: List[str] = tokenizer_r.encode_plus(
SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , return_token_type_ids=SCREAMING_SNAKE_CASE__ , return_offsets_mapping=SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , )
snake_case: Optional[int] = tokenizer_r.do_lower_case if hasattr(SCREAMING_SNAKE_CASE__ , 'do_lower_case' ) else False
snake_case: int = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'Allen'),
((21, 23), '##NL'),
((23, 24), '##P'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'allen'),
((21, 23), '##nl'),
((23, 24), '##p'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Dict = ['的', '人', '有']
snake_case: Any = ''.join(SCREAMING_SNAKE_CASE__ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
snake_case: Tuple = True
snake_case: List[Any] = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
snake_case: List[Any] = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = tokenizer_p.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
snake_case: str = tokenizer_r.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = tokenizer_r.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ )
snake_case: str = tokenizer_p.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case: Tuple = False
snake_case: Union[str, Any] = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
snake_case: Union[str, Any] = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
snake_case: Union[str, Any] = tokenizer_r.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
snake_case: int = tokenizer_p.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = tokenizer_r.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ )
snake_case: Union[str, Any] = tokenizer_p.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ )
# it is expected that only the first Chinese character is not preceded by "##".
snake_case: Union[str, Any] = [
F"""##{token}""" if idx != 0 else token for idx, token in enumerate(SCREAMING_SNAKE_CASE__ )
]
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@slow
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: int = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
snake_case: int = tokenizer.encode('你好' , add_special_tokens=SCREAMING_SNAKE_CASE__ )
snake_case: Any = tokenizer.encode('你是谁' , add_special_tokens=SCREAMING_SNAKE_CASE__ )
snake_case: Tuple = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE__ )
snake_case: str = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[str] = self.get_tokenizers(do_lower_case=SCREAMING_SNAKE_CASE__ )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
snake_case: Dict = '你好,你是谁'
snake_case: int = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
snake_case: Union[str, Any] = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
snake_case: List[str] = tokenizer.convert_tokens_to_shape_ids(SCREAMING_SNAKE_CASE__ )
snake_case: Union[str, Any] = tokenizer.convert_tokens_to_pronunciation_ids(SCREAMING_SNAKE_CASE__ )
snake_case: Optional[Any] = tokenizer.prepare_for_model(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = tokenizer.encode_plus(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) | 692 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=13 , SCREAMING_SNAKE_CASE__=30 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=37 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=10 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=None , ):
'''simple docstring'''
snake_case: Dict = parent
snake_case: List[Any] = batch_size
snake_case: Dict = image_size
snake_case: Optional[int] = patch_size
snake_case: Optional[Any] = num_channels
snake_case: Tuple = is_training
snake_case: Any = use_labels
snake_case: List[str] = hidden_size
snake_case: Tuple = num_hidden_layers
snake_case: List[str] = num_attention_heads
snake_case: Any = intermediate_size
snake_case: int = hidden_act
snake_case: Optional[Any] = hidden_dropout_prob
snake_case: List[Any] = attention_probs_dropout_prob
snake_case: Dict = type_sequence_label_size
snake_case: Union[str, Any] = initializer_range
snake_case: str = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
snake_case: Union[str, Any] = (image_size // patch_size) ** 2
snake_case: Tuple = num_patches + 1
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case: Optional[int] = None
if self.use_labels:
snake_case: int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case: List[Any] = self.get_config()
return config, pixel_values, labels
def _UpperCamelCase ( self ):
'''simple docstring'''
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: Optional[int] = ViTMSNModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
snake_case: int = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: Union[str, Any] = self.type_sequence_label_size
snake_case: Tuple = ViTMSNForImageClassification(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
snake_case: Dict = model(SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
print('Pixel and labels shape: {pixel_values.shape}, {labels.shape}' )
print('Labels: {labels}' )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
snake_case: Tuple = 1
snake_case: Any = ViTMSNForImageClassification(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
snake_case: str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case: List[str] = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[int] = self.prepare_config_and_inputs()
snake_case: str = config_and_inputs
snake_case: Tuple = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( snake_case , snake_case , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
__UpperCamelCase = (
{"feature-extraction": ViTMSNModel, "image-classification": ViTMSNForImageClassification}
if is_torch_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[str] = ViTMSNModelTester(self )
snake_case: Dict = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , has_text_modality=SCREAMING_SNAKE_CASE__ , hidden_size=37 )
def _UpperCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMSN does not use inputs_embeds' )
def _UpperCamelCase ( self ):
'''simple docstring'''
pass
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case: Tuple = model_class(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case: List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE__ , nn.Linear ) )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case: Optional[int] = model_class(SCREAMING_SNAKE_CASE__ )
snake_case: str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case: Optional[Any] = [*signature.parameters.keys()]
snake_case: List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE__ )
@slow
def _UpperCamelCase ( self ):
'''simple docstring'''
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case: Optional[Any] = ViTMSNModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def lowerCAmelCase_ ( ):
'''simple docstring'''
snake_case: Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _UpperCamelCase ( self ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained('facebook/vit-msn-small' ) if is_vision_available() else None
@slow
def _UpperCamelCase ( self ):
'''simple docstring'''
torch.manual_seed(2 )
snake_case: Optional[int] = ViTMSNForImageClassification.from_pretrained('facebook/vit-msn-small' ).to(SCREAMING_SNAKE_CASE__ )
snake_case: Union[str, Any] = self.default_image_processor
snake_case: Dict = prepare_img()
snake_case: Dict = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).to(SCREAMING_SNAKE_CASE__ )
# forward pass
with torch.no_grad():
snake_case: Dict = model(**SCREAMING_SNAKE_CASE__ )
# verify the logits
snake_case: Union[str, Any] = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE__ )
snake_case: str = torch.tensor([-0.08_03, -0.44_54, -0.23_75] ).to(SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 ) ) | 705 |
'''simple docstring'''
from math import asin, atan, cos, radians, sin, sqrt, tan
__UpperCAmelCase = 6378137.0
__UpperCAmelCase = 6356752.314245
__UpperCAmelCase = 6_378_137
def lowerCAmelCase_ ( __A : float , __A : float , __A : float , __A : float ):
'''simple docstring'''
snake_case: Optional[Any] = (AXIS_A - AXIS_B) / AXIS_A
snake_case: List[Any] = atan((1 - flattening) * tan(radians(__A ) ) )
snake_case: List[Any] = atan((1 - flattening) * tan(radians(__A ) ) )
snake_case: Tuple = radians(__A )
snake_case: Tuple = radians(__A )
# Equation
snake_case: List[Any] = sin((phi_a - phi_a) / 2 )
snake_case: Dict = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
snake_case: Union[str, Any] = sqrt(sin_sq_phi + (cos(__A ) * cos(__A ) * sin_sq_lambda) )
return 2 * RADIUS * asin(__A )
if __name__ == "__main__":
import doctest
doctest.testmod() | 692 | 0 |
'''simple docstring'''
from __future__ import annotations
from math import pi
def lowerCAmelCase_ ( __A : float , __A : float , __A : float ):
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if inductance < 0:
raise ValueError('Inductance cannot be negative' )
if frequency < 0:
raise ValueError('Frequency cannot be negative' )
if reactance < 0:
raise ValueError('Inductive reactance cannot be negative' )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod() | 706 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase = {
"configuration_roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerOnnxConfig"],
"tokenization_roformer": ["RoFormerTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ["RoFormerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoFormerForCausalLM",
"RoFormerForMaskedLM",
"RoFormerForMultipleChoice",
"RoFormerForQuestionAnswering",
"RoFormerForSequenceClassification",
"RoFormerForTokenClassification",
"RoFormerLayer",
"RoFormerModel",
"RoFormerPreTrainedModel",
"load_tf_weights_in_roformer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRoFormerForCausalLM",
"TFRoFormerForMaskedLM",
"TFRoFormerForMultipleChoice",
"TFRoFormerForQuestionAnswering",
"TFRoFormerForSequenceClassification",
"TFRoFormerForTokenClassification",
"TFRoFormerLayer",
"TFRoFormerModel",
"TFRoFormerPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxRoFormerForMaskedLM",
"FlaxRoFormerForMultipleChoice",
"FlaxRoFormerForQuestionAnswering",
"FlaxRoFormerForSequenceClassification",
"FlaxRoFormerForTokenClassification",
"FlaxRoFormerModel",
"FlaxRoFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 692 | 0 |
'''simple docstring'''
def lowerCAmelCase_ ( __A : int , __A : int ):
'''simple docstring'''
snake_case: Optional[Any] = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
snake_case: Optional[int] = n - k
# Calculate C(n,k)
for i in range(__A ):
result *= n - i
result //= i + 1
return result
def lowerCAmelCase_ ( __A : int ):
'''simple docstring'''
return binomial_coefficient(2 * node_count , __A ) // (node_count + 1)
def lowerCAmelCase_ ( __A : int ):
'''simple docstring'''
if n < 0:
raise ValueError('factorial() not defined for negative values' )
snake_case: Tuple = 1
for i in range(1 , n + 1 ):
result *= i
return result
def lowerCAmelCase_ ( __A : int ):
'''simple docstring'''
return catalan_number(__A ) * factorial(__A )
if __name__ == "__main__":
__UpperCAmelCase = int(input("Enter the number of nodes: ").strip() or 0)
if node_count <= 0:
raise ValueError("We need some nodes to work with.")
print(
F'Given {node_count} nodes, there are {binary_tree_count(node_count)} '
F'binary trees and {catalan_number(node_count)} binary search trees.'
) | 707 |
'''simple docstring'''
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def lowerCAmelCase_ ( __A : Tuple ):
'''simple docstring'''
snake_case: Tuple = model.config
snake_case: str = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=1_28 , )
snake_case: Optional[Any] = MBartConfig(
is_decoder=__A , is_encoder_decoder=__A , add_cross_attention=__A , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=__A , add_final_layer_norm=__A , )
return encoder_config, decoder_config
def lowerCAmelCase_ ( __A : int ):
'''simple docstring'''
if "encoder.model" in name:
snake_case: Optional[Any] = name.replace('encoder.model' , 'encoder' )
if "decoder.model" in name:
snake_case: str = name.replace('decoder.model' , 'decoder' )
if "patch_embed.proj" in name:
snake_case: Any = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
snake_case: Optional[int] = name.replace('patch_embed.norm' , 'embeddings.norm' )
if name.startswith('encoder' ):
if "layers" in name:
snake_case: Tuple = 'encoder.' + name
if "attn.proj" in name:
snake_case: Optional[int] = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name and "mask" not in name:
snake_case: Dict = name.replace('attn' , 'attention.self' )
if "norm1" in name:
snake_case: Union[str, Any] = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
snake_case: Dict = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
snake_case: List[str] = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
snake_case: Dict = name.replace('mlp.fc2' , 'output.dense' )
if name == "encoder.norm.weight":
snake_case: Dict = 'encoder.layernorm.weight'
if name == "encoder.norm.bias":
snake_case: int = 'encoder.layernorm.bias'
return name
def lowerCAmelCase_ ( __A : List[Any] , __A : Optional[Any] ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
snake_case: List[Any] = orig_state_dict.pop(__A )
if "qkv" in key:
snake_case: Union[str, Any] = key.split('.' )
snake_case: Optional[Any] = int(key_split[3] )
snake_case: Any = int(key_split[5] )
snake_case: Union[str, Any] = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
snake_case: Union[str, Any] = val[:dim, :]
snake_case: Any = val[dim : dim * 2, :]
snake_case: List[str] = val[-dim:, :]
else:
snake_case: str = val[:dim]
snake_case: Union[str, Any] = val[dim : dim * 2]
snake_case: List[Any] = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
snake_case: Optional[int] = val
return orig_state_dict
def lowerCAmelCase_ ( __A : List[Any] , __A : Any=None , __A : List[str]=False ):
'''simple docstring'''
snake_case: str = DonutModel.from_pretrained(__A ).eval()
# load HuggingFace model
snake_case , snake_case: Optional[Any] = get_configs(__A )
snake_case: Optional[int] = DonutSwinModel(__A )
snake_case: Tuple = MBartForCausalLM(__A )
snake_case: Optional[Any] = VisionEncoderDecoderModel(encoder=__A , decoder=__A )
model.eval()
snake_case: Optional[int] = original_model.state_dict()
snake_case: Optional[int] = convert_state_dict(__A , __A )
model.load_state_dict(__A )
# verify results on scanned document
snake_case: Union[str, Any] = load_dataset('hf-internal-testing/example-documents' )
snake_case: str = dataset['test'][0]['image'].convert('RGB' )
snake_case: Optional[int] = XLMRobertaTokenizerFast.from_pretrained(__A , from_slow=__A )
snake_case: Any = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
snake_case: Dict = DonutProcessor(__A , __A )
snake_case: Optional[Any] = processor(__A , return_tensors='pt' ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
snake_case: int = '<s_docvqa><s_question>{user_input}</s_question><s_answer>'
snake_case: Optional[Any] = 'When is the coffee break?'
snake_case: Optional[int] = task_prompt.replace('{user_input}' , __A )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
snake_case: Dict = '<s_rvlcdip>'
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
snake_case: str = '<s_cord>'
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
snake_case: str = 's_cord-v2>'
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
snake_case: int = '<s_zhtrainticket>'
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
snake_case: Optional[Any] = 'hello world'
else:
raise ValueError('Model name not supported' )
snake_case: Optional[int] = original_model.decoder.tokenizer(__A , add_special_tokens=__A , return_tensors='pt' )[
'input_ids'
]
snake_case: Any = original_model.encoder.model.patch_embed(__A )
snake_case , snake_case: Dict = model.encoder.embeddings(__A )
assert torch.allclose(__A , __A , atol=1E-3 )
# verify encoder hidden states
snake_case: Tuple = original_model.encoder(__A )
snake_case: List[str] = model.encoder(__A ).last_hidden_state
assert torch.allclose(__A , __A , atol=1E-2 )
# verify decoder hidden states
snake_case: List[Any] = original_model(__A , __A , __A ).logits
snake_case: List[Any] = model(__A , decoder_input_ids=__A ).logits
assert torch.allclose(__A , __A , atol=1E-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(f"""Saving model and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(__A )
processor.save_pretrained(__A )
if push_to_hub:
model.push_to_hub('nielsr/' + model_name.split('/' )[-1] , commit_message='Update model' )
processor.push_to_hub('nielsr/' + model_name.split('/' )[-1] , commit_message='Update model' )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="naver-clova-ix/donut-base-finetuned-docvqa",
required=False,
type=str,
help="Name of the original model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
required=False,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether or not to push the converted model and processor to the 🤗 hub.",
)
__UpperCAmelCase = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 692 | 0 |
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class SCREAMING_SNAKE_CASE ( snake_case ):
'''simple docstring'''
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[str] = tempfile.mkdtemp()
snake_case: List[str] = 5
# Realm tok
snake_case: Dict = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'test',
'question',
'this',
'is',
'the',
'first',
'second',
'third',
'fourth',
'fifth',
'record',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
snake_case: Optional[int] = os.path.join(self.tmpdirname , 'realm_tokenizer' )
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
snake_case: Any = os.path.join(SCREAMING_SNAKE_CASE__ , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
snake_case: List[str] = os.path.join(self.tmpdirname , 'realm_block_records' )
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'realm_tokenizer' ) )
def _UpperCamelCase ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[str] = RealmConfig(num_block_records=self.num_block_records )
return config
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[int] = Dataset.from_dict(
{
'id': ['0', '1'],
'question': ['foo', 'bar'],
'answers': [['Foo', 'Bar'], ['Bar']],
} )
return dataset
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[Any] = np.array(
[
b'This is the first record',
b'This is the second record',
b'This is the third record',
b'This is the fourth record',
b'This is the fifth record',
b'This is a longer longer longer record',
] , dtype=SCREAMING_SNAKE_CASE__ , )
return block_records
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[int] = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Dict = self.get_config()
snake_case: List[str] = self.get_dummy_retriever()
snake_case: Dict = retriever.tokenizer
snake_case: Optional[int] = np.array([0, 3] , dtype='long' )
snake_case: Dict = tokenizer(['Test question'] ).input_ids
snake_case: Tuple = tokenizer(
['the fourth'] , add_special_tokens=SCREAMING_SNAKE_CASE__ , return_token_type_ids=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , ).input_ids
snake_case: Optional[int] = config.reader_seq_len
snake_case: List[str] = retriever(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , answer_ids=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , return_tensors='np' )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , 2 )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , 2 )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ['[CLS]', 'test', 'question', '[SEP]', 'this', 'is', 'the', 'first', 'record', '[SEP]'] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ['[CLS]', 'test', 'question', '[SEP]', 'this', 'is', 'the', 'fourth', 'record', '[SEP]'] , )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Tuple = self.get_config()
snake_case: Dict = self.get_dummy_retriever()
snake_case: str = retriever.tokenizer
snake_case: List[Any] = np.array([0, 3, 5] , dtype='long' )
snake_case: List[Any] = tokenizer(['Test question'] ).input_ids
snake_case: List[str] = tokenizer(
['the fourth', 'longer longer'] , add_special_tokens=SCREAMING_SNAKE_CASE__ , return_token_type_ids=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , ).input_ids
snake_case: Any = config.reader_seq_len
snake_case: Any = retriever(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , answer_ids=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , return_tensors='np' )
self.assertEqual([False, True, True] , SCREAMING_SNAKE_CASE__ )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , SCREAMING_SNAKE_CASE__ )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[Any] = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , 'realm_block_records' ) )
# Test local path
snake_case: Union[str, Any] = retriever.from_pretrained(os.path.join(self.tmpdirname , 'realm_block_records' ) )
self.assertEqual(retriever.block_records[0] , b'This is the first record' )
# Test mocked remote path
with patch('transformers.models.realm.retrieval_realm.hf_hub_download' ) as mock_hf_hub_download:
snake_case: Dict = os.path.join(
os.path.join(self.tmpdirname , 'realm_block_records' ) , _REALM_BLOCK_RECORDS_FILENAME )
snake_case: int = RealmRetriever.from_pretrained('google/realm-cc-news-pretrained-openqa' )
self.assertEqual(retriever.block_records[0] , b'This is the first record' ) | 708 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[Any] = {
'task_specific_params': {
'summarization': {'length_penalty': 1.0, 'max_length': 1_28, 'min_length': 12, 'num_beams': 4},
'summarization_cnn': {'length_penalty': 2.0, 'max_length': 1_42, 'min_length': 56, 'num_beams': 4},
'summarization_xsum': {'length_penalty': 1.0, 'max_length': 62, 'min_length': 11, 'num_beams': 6},
}
}
snake_case: Union[str, Any] = {
'task_specific_params.summarization.length_penalty': 1.0,
'task_specific_params.summarization.max_length': 1_28,
'task_specific_params.summarization.min_length': 12,
'task_specific_params.summarization.num_beams': 4,
'task_specific_params.summarization_cnn.length_penalty': 2.0,
'task_specific_params.summarization_cnn.max_length': 1_42,
'task_specific_params.summarization_cnn.min_length': 56,
'task_specific_params.summarization_cnn.num_beams': 4,
'task_specific_params.summarization_xsum.length_penalty': 1.0,
'task_specific_params.summarization_xsum.max_length': 62,
'task_specific_params.summarization_xsum.min_length': 11,
'task_specific_params.summarization_xsum.num_beams': 6,
}
self.assertEqual(flatten_dict(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Dict = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(SCREAMING_SNAKE_CASE__ ) , x.transpose() ) )
snake_case: List[str] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(SCREAMING_SNAKE_CASE__ , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Tuple = np.random.randn(3 , 4 )
snake_case: Optional[Any] = torch.tensor(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(transpose(SCREAMING_SNAKE_CASE__ ) , transpose(SCREAMING_SNAKE_CASE__ ).numpy() ) )
snake_case: Optional[Any] = np.random.randn(3 , 4 , 5 )
snake_case: Optional[int] = torch.tensor(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(transpose(SCREAMING_SNAKE_CASE__ , axes=(1, 2, 0) ) , transpose(SCREAMING_SNAKE_CASE__ , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: str = np.random.randn(3 , 4 )
snake_case: Optional[Any] = tf.constant(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(transpose(SCREAMING_SNAKE_CASE__ ) , transpose(SCREAMING_SNAKE_CASE__ ).numpy() ) )
snake_case: Dict = np.random.randn(3 , 4 , 5 )
snake_case: str = tf.constant(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(transpose(SCREAMING_SNAKE_CASE__ , axes=(1, 2, 0) ) , transpose(SCREAMING_SNAKE_CASE__ , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: int = np.random.randn(3 , 4 )
snake_case: Optional[int] = jnp.array(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(transpose(SCREAMING_SNAKE_CASE__ ) , np.asarray(transpose(SCREAMING_SNAKE_CASE__ ) ) ) )
snake_case: Any = np.random.randn(3 , 4 , 5 )
snake_case: Optional[Any] = jnp.array(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(transpose(SCREAMING_SNAKE_CASE__ , axes=(1, 2, 0) ) , np.asarray(transpose(SCREAMING_SNAKE_CASE__ , axes=(1, 2, 0) ) ) ) )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[Any] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(SCREAMING_SNAKE_CASE__ , (4, 3) ) , np.reshape(SCREAMING_SNAKE_CASE__ , (4, 3) ) ) )
snake_case: Optional[int] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(SCREAMING_SNAKE_CASE__ , (12, 5) ) , np.reshape(SCREAMING_SNAKE_CASE__ , (12, 5) ) ) )
@require_torch
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[Any] = np.random.randn(3 , 4 )
snake_case: Tuple = torch.tensor(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(reshape(SCREAMING_SNAKE_CASE__ , (4, 3) ) , reshape(SCREAMING_SNAKE_CASE__ , (4, 3) ).numpy() ) )
snake_case: List[str] = np.random.randn(3 , 4 , 5 )
snake_case: Tuple = torch.tensor(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(reshape(SCREAMING_SNAKE_CASE__ , (12, 5) ) , reshape(SCREAMING_SNAKE_CASE__ , (12, 5) ).numpy() ) )
@require_tf
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[Any] = np.random.randn(3 , 4 )
snake_case: Tuple = tf.constant(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(reshape(SCREAMING_SNAKE_CASE__ , (4, 3) ) , reshape(SCREAMING_SNAKE_CASE__ , (4, 3) ).numpy() ) )
snake_case: Any = np.random.randn(3 , 4 , 5 )
snake_case: Union[str, Any] = tf.constant(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(reshape(SCREAMING_SNAKE_CASE__ , (12, 5) ) , reshape(SCREAMING_SNAKE_CASE__ , (12, 5) ).numpy() ) )
@require_flax
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: int = np.random.randn(3 , 4 )
snake_case: str = jnp.array(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(reshape(SCREAMING_SNAKE_CASE__ , (4, 3) ) , np.asarray(reshape(SCREAMING_SNAKE_CASE__ , (4, 3) ) ) ) )
snake_case: Any = np.random.randn(3 , 4 , 5 )
snake_case: List[str] = jnp.array(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(reshape(SCREAMING_SNAKE_CASE__ , (12, 5) ) , np.asarray(reshape(SCREAMING_SNAKE_CASE__ , (12, 5) ) ) ) )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: int = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(SCREAMING_SNAKE_CASE__ ) , np.squeeze(SCREAMING_SNAKE_CASE__ ) ) )
snake_case: List[str] = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(SCREAMING_SNAKE_CASE__ , axis=2 ) , np.squeeze(SCREAMING_SNAKE_CASE__ , axis=2 ) ) )
@require_torch
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: int = np.random.randn(1 , 3 , 4 )
snake_case: List[str] = torch.tensor(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(squeeze(SCREAMING_SNAKE_CASE__ ) , squeeze(SCREAMING_SNAKE_CASE__ ).numpy() ) )
snake_case: Optional[Any] = np.random.randn(1 , 4 , 1 , 5 )
snake_case: int = torch.tensor(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(squeeze(SCREAMING_SNAKE_CASE__ , axis=2 ) , squeeze(SCREAMING_SNAKE_CASE__ , axis=2 ).numpy() ) )
@require_tf
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Any = np.random.randn(1 , 3 , 4 )
snake_case: Optional[Any] = tf.constant(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(squeeze(SCREAMING_SNAKE_CASE__ ) , squeeze(SCREAMING_SNAKE_CASE__ ).numpy() ) )
snake_case: Union[str, Any] = np.random.randn(1 , 4 , 1 , 5 )
snake_case: Union[str, Any] = tf.constant(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(squeeze(SCREAMING_SNAKE_CASE__ , axis=2 ) , squeeze(SCREAMING_SNAKE_CASE__ , axis=2 ).numpy() ) )
@require_flax
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[int] = np.random.randn(1 , 3 , 4 )
snake_case: List[Any] = jnp.array(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(squeeze(SCREAMING_SNAKE_CASE__ ) , np.asarray(squeeze(SCREAMING_SNAKE_CASE__ ) ) ) )
snake_case: Tuple = np.random.randn(1 , 4 , 1 , 5 )
snake_case: Tuple = jnp.array(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(squeeze(SCREAMING_SNAKE_CASE__ , axis=2 ) , np.asarray(squeeze(SCREAMING_SNAKE_CASE__ , axis=2 ) ) ) )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[int] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(SCREAMING_SNAKE_CASE__ , axis=1 ) , np.expand_dims(SCREAMING_SNAKE_CASE__ , axis=1 ) ) )
@require_torch
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: int = np.random.randn(3 , 4 )
snake_case: Any = torch.tensor(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(expand_dims(SCREAMING_SNAKE_CASE__ , axis=1 ) , expand_dims(SCREAMING_SNAKE_CASE__ , axis=1 ).numpy() ) )
@require_tf
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: str = np.random.randn(3 , 4 )
snake_case: Any = tf.constant(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(expand_dims(SCREAMING_SNAKE_CASE__ , axis=1 ) , expand_dims(SCREAMING_SNAKE_CASE__ , axis=1 ).numpy() ) )
@require_flax
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[Any] = np.random.randn(3 , 4 )
snake_case: int = jnp.array(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(expand_dims(SCREAMING_SNAKE_CASE__ , axis=1 ) , np.asarray(expand_dims(SCREAMING_SNAKE_CASE__ , axis=1 ) ) ) ) | 692 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
__UpperCAmelCase = {
"configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"],
"processing_trocr": ["TrOCRProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"TROCR_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrOCRForCausalLM",
"TrOCRPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 709 |
'''simple docstring'''
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
__UpperCAmelCase = logging.get_logger(__name__)
# General docstring
__UpperCAmelCase = "PoolFormerConfig"
# Base docstring
__UpperCAmelCase = "sail/poolformer_s12"
__UpperCAmelCase = [1, 512, 7, 7]
# Image classification docstring
__UpperCAmelCase = "sail/poolformer_s12"
__UpperCAmelCase = "tabby, tabby cat"
__UpperCAmelCase = [
"sail/poolformer_s12",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def lowerCAmelCase_ ( __A : Tuple , __A : float = 0.0 , __A : bool = False ):
'''simple docstring'''
if drop_prob == 0.0 or not training:
return input
snake_case: Union[str, Any] = 1 - drop_prob
snake_case: List[Any] = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
snake_case: List[Any] = keep_prob + torch.rand(__A , dtype=input.dtype , device=input.device )
random_tensor.floor_() # binarize
snake_case: Any = input.div(__A ) * random_tensor
return output
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ = None ):
'''simple docstring'''
super().__init__()
snake_case: List[str] = drop_prob
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return drop_path(SCREAMING_SNAKE_CASE__ , self.drop_prob , self.training )
def _UpperCamelCase ( self ):
'''simple docstring'''
return "p={}".format(self.drop_prob )
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
super().__init__()
snake_case: List[str] = patch_size if isinstance(SCREAMING_SNAKE_CASE__ , collections.abc.Iterable ) else (patch_size, patch_size)
snake_case: List[str] = stride if isinstance(SCREAMING_SNAKE_CASE__ , collections.abc.Iterable ) else (stride, stride)
snake_case: Union[str, Any] = padding if isinstance(SCREAMING_SNAKE_CASE__ , collections.abc.Iterable ) else (padding, padding)
snake_case: Any = nn.Convad(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , kernel_size=SCREAMING_SNAKE_CASE__ , stride=SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ )
snake_case: Tuple = norm_layer(SCREAMING_SNAKE_CASE__ ) if norm_layer else nn.Identity()
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: int = self.projection(SCREAMING_SNAKE_CASE__ )
snake_case: Any = self.norm(SCREAMING_SNAKE_CASE__ )
return embeddings
class SCREAMING_SNAKE_CASE ( nn.GroupNorm ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
super().__init__(1 , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
super().__init__()
snake_case: str = nn.AvgPoolad(SCREAMING_SNAKE_CASE__ , stride=1 , padding=pool_size // 2 , count_include_pad=SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return self.pool(SCREAMING_SNAKE_CASE__ ) - hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
super().__init__()
snake_case: List[Any] = nn.Convad(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 1 )
snake_case: Any = nn.Convad(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 1 )
snake_case: str = PoolFormerDropPath(SCREAMING_SNAKE_CASE__ )
if isinstance(config.hidden_act , SCREAMING_SNAKE_CASE__ ):
snake_case: Tuple = ACTaFN[config.hidden_act]
else:
snake_case: int = config.hidden_act
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: Dict = self.conva(SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = self.act_fn(SCREAMING_SNAKE_CASE__ )
snake_case: List[str] = self.drop(SCREAMING_SNAKE_CASE__ )
snake_case: List[str] = self.conva(SCREAMING_SNAKE_CASE__ )
snake_case: str = self.drop(SCREAMING_SNAKE_CASE__ )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
super().__init__()
snake_case: Tuple = PoolFormerPooling(SCREAMING_SNAKE_CASE__ )
snake_case: Dict = PoolFormerOutput(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case: Dict = PoolFormerGroupNorm(SCREAMING_SNAKE_CASE__ )
snake_case: List[Any] = PoolFormerGroupNorm(SCREAMING_SNAKE_CASE__ )
# Useful for training neural nets
snake_case: Union[str, Any] = PoolFormerDropPath(SCREAMING_SNAKE_CASE__ ) if drop_path > 0.0 else nn.Identity()
snake_case: Optional[Any] = config.use_layer_scale
if config.use_layer_scale:
snake_case: Any = nn.Parameter(
config.layer_scale_init_value * torch.ones((SCREAMING_SNAKE_CASE__) ) , requires_grad=SCREAMING_SNAKE_CASE__ )
snake_case: int = nn.Parameter(
config.layer_scale_init_value * torch.ones((SCREAMING_SNAKE_CASE__) ) , requires_grad=SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if self.use_layer_scale:
snake_case: str = self.pooling(self.before_norm(SCREAMING_SNAKE_CASE__ ) )
snake_case: Dict = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
snake_case: str = hidden_states + self.drop_path(SCREAMING_SNAKE_CASE__ )
snake_case: Dict = ()
snake_case: Dict = self.output(self.after_norm(SCREAMING_SNAKE_CASE__ ) )
snake_case: Union[str, Any] = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
snake_case: Any = hidden_states + self.drop_path(SCREAMING_SNAKE_CASE__ )
snake_case: List[str] = (output,) + outputs
return outputs
else:
snake_case: Optional[Any] = self.drop_path(self.pooling(self.before_norm(SCREAMING_SNAKE_CASE__ ) ) )
# First residual connection
snake_case: Union[str, Any] = pooling_output + hidden_states
snake_case: List[Any] = ()
# Second residual connection inside the PoolFormerOutput block
snake_case: List[str] = self.drop_path(self.output(self.after_norm(SCREAMING_SNAKE_CASE__ ) ) )
snake_case: Dict = hidden_states + layer_output
snake_case: Optional[Any] = (output,) + outputs
return outputs
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
super().__init__()
snake_case: List[Any] = config
# stochastic depth decay rule
snake_case: List[Any] = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
snake_case: Union[str, Any] = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
snake_case: List[Any] = nn.ModuleList(SCREAMING_SNAKE_CASE__ )
# Transformer blocks
snake_case: str = []
snake_case: int = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
snake_case: List[str] = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
SCREAMING_SNAKE_CASE__ , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(SCREAMING_SNAKE_CASE__ ) )
snake_case: Tuple = nn.ModuleList(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=True ):
'''simple docstring'''
snake_case: str = () if output_hidden_states else None
snake_case: Dict = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
snake_case , snake_case: Dict = layers
# Get patch embeddings from hidden_states
snake_case: int = embedding_layer(SCREAMING_SNAKE_CASE__ )
# Send the embeddings through the blocks
for _, blk in enumerate(SCREAMING_SNAKE_CASE__ ):
snake_case: List[Any] = blk(SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = layer_outputs[0]
if output_hidden_states:
snake_case: List[str] = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=SCREAMING_SNAKE_CASE__ , hidden_states=SCREAMING_SNAKE_CASE__ )
class SCREAMING_SNAKE_CASE ( snake_case ):
'''simple docstring'''
__UpperCamelCase = PoolFormerConfig
__UpperCamelCase = "poolformer"
__UpperCamelCase = "pixel_values"
__UpperCamelCase = True
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE__ , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(SCREAMING_SNAKE_CASE__ , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False ):
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case: List[Any] = value
__UpperCAmelCase = R"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
__UpperCAmelCase = R"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n"
@add_start_docstrings(
"The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top." , snake_case , )
class SCREAMING_SNAKE_CASE ( snake_case ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
super().__init__(SCREAMING_SNAKE_CASE__ )
snake_case: List[Any] = config
snake_case: Tuple = PoolFormerEncoder(SCREAMING_SNAKE_CASE__ )
# Initialize weights and apply final processing
self.post_init()
def _UpperCamelCase ( self ):
'''simple docstring'''
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=SCREAMING_SNAKE_CASE__ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , ):
'''simple docstring'''
snake_case: Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
snake_case: List[Any] = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('You have to specify pixel_values' )
snake_case: Optional[Any] = self.encoder(
SCREAMING_SNAKE_CASE__ , output_hidden_states=SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ , )
snake_case: List[Any] = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=SCREAMING_SNAKE_CASE__ , hidden_states=encoder_outputs.hidden_states , )
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
super().__init__()
snake_case: Any = nn.Linear(config.hidden_size , config.hidden_size )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: int = self.dense(SCREAMING_SNAKE_CASE__ )
return output
@add_start_docstrings(
"\n PoolFormer Model transformer with an image classification head on top\n " , snake_case , )
class SCREAMING_SNAKE_CASE ( snake_case ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
super().__init__(SCREAMING_SNAKE_CASE__ )
snake_case: Union[str, Any] = config.num_labels
snake_case: str = PoolFormerModel(SCREAMING_SNAKE_CASE__ )
# Final norm
snake_case: int = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
snake_case: Dict = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=SCREAMING_SNAKE_CASE__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , ):
'''simple docstring'''
snake_case: Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
snake_case: Optional[Any] = self.poolformer(
SCREAMING_SNAKE_CASE__ , output_hidden_states=SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ , )
snake_case: Any = outputs[0]
snake_case: str = self.classifier(self.norm(SCREAMING_SNAKE_CASE__ ).mean([-2, -1] ) )
snake_case: Any = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
snake_case: Tuple = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
snake_case: Dict = 'single_label_classification'
else:
snake_case: List[str] = 'multi_label_classification'
if self.config.problem_type == "regression":
snake_case: Union[str, Any] = MSELoss()
if self.num_labels == 1:
snake_case: List[str] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
snake_case: int = loss_fct(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif self.config.problem_type == "single_label_classification":
snake_case: Union[str, Any] = CrossEntropyLoss()
snake_case: Dict = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
snake_case: int = BCEWithLogitsLoss()
snake_case: Optional[int] = loss_fct(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if not return_dict:
snake_case: str = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=SCREAMING_SNAKE_CASE__ , logits=SCREAMING_SNAKE_CASE__ , hidden_states=outputs.hidden_states ) | 692 | 0 |
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class SCREAMING_SNAKE_CASE ( snake_case ):
'''simple docstring'''
__UpperCamelCase = ["image_processor", "tokenizer"]
__UpperCamelCase = "BlipImageProcessor"
__UpperCamelCase = "AutoTokenizer"
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: List[Any] = False
super().__init__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case: Dict = self.image_processor
def __call__( self , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = 0 , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ):
'''simple docstring'''
if images is None and text is None:
raise ValueError('You have to specify either images or text.' )
# Get only text
if images is None:
snake_case: List[Any] = self.tokenizer
snake_case: List[str] = self.tokenizer(
text=SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , stride=SCREAMING_SNAKE_CASE__ , pad_to_multiple_of=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , return_overflowing_tokens=SCREAMING_SNAKE_CASE__ , return_special_tokens_mask=SCREAMING_SNAKE_CASE__ , return_offsets_mapping=SCREAMING_SNAKE_CASE__ , return_token_type_ids=SCREAMING_SNAKE_CASE__ , return_length=SCREAMING_SNAKE_CASE__ , verbose=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
return text_encoding
# add pixel_values
snake_case: int = self.image_processor(SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ )
if text is not None:
snake_case: Tuple = self.tokenizer(
text=SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , stride=SCREAMING_SNAKE_CASE__ , pad_to_multiple_of=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , return_overflowing_tokens=SCREAMING_SNAKE_CASE__ , return_special_tokens_mask=SCREAMING_SNAKE_CASE__ , return_offsets_mapping=SCREAMING_SNAKE_CASE__ , return_token_type_ids=SCREAMING_SNAKE_CASE__ , return_length=SCREAMING_SNAKE_CASE__ , verbose=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
else:
snake_case: Any = None
if text_encoding is not None:
encoding_image_processor.update(SCREAMING_SNAKE_CASE__ )
return encoding_image_processor
def _UpperCamelCase ( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: str = self.tokenizer.model_input_names
snake_case: Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) | 710 |
'''simple docstring'''
from queue import PriorityQueue
from typing import Any
import numpy as np
def lowerCAmelCase_ ( __A : dict , __A : str , __A : set , __A : set , __A : dict , __A : dict , __A : PriorityQueue , __A : dict , __A : float | int , ):
'''simple docstring'''
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
snake_case: Any = cst_fwd.get(__A , np.inf )
snake_case: int = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
snake_case: Union[str, Any] = new_cost_f
snake_case: Tuple = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
snake_case: List[str] = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def lowerCAmelCase_ ( __A : str , __A : str , __A : dict , __A : dict ):
'''simple docstring'''
snake_case: Optional[Any] = -1
snake_case: Any = set()
snake_case: str = set()
snake_case: int = {source: 0}
snake_case: Dict = {destination: 0}
snake_case: int = {source: None}
snake_case: Union[str, Any] = {destination: None}
snake_case: PriorityQueue[Any] = PriorityQueue()
snake_case: PriorityQueue[Any] = PriorityQueue()
snake_case: Tuple = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
snake_case , snake_case: List[str] = queue_forward.get()
visited_forward.add(__A )
snake_case , snake_case: int = queue_backward.get()
visited_backward.add(__A )
snake_case: str = pass_and_relaxation(
__A , __A , __A , __A , __A , __A , __A , __A , __A , )
snake_case: Optional[Any] = pass_and_relaxation(
__A , __A , __A , __A , __A , __A , __A , __A , __A , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
snake_case: Any = shortest_distance
return shortest_path_distance
__UpperCAmelCase = {
"B": [["C", 1]],
"C": [["D", 1]],
"D": [["F", 1]],
"E": [["B", 1], ["G", 2]],
"F": [],
"G": [["F", 1]],
}
__UpperCAmelCase = {
"B": [["E", 1]],
"C": [["B", 1]],
"D": [["C", 1]],
"F": [["D", 1], ["G", 1]],
"E": [[None, np.inf]],
"G": [["E", 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod() | 692 | 0 |
'''simple docstring'''
from __future__ import annotations
from cmath import sqrt
def lowerCAmelCase_ ( __A : int , __A : int , __A : int ):
'''simple docstring'''
if a == 0:
raise ValueError('Coefficient \'a\' must not be zero.' )
snake_case: Tuple = b * b - 4 * a * c
snake_case: Dict = (-b + sqrt(__A )) / (2 * a)
snake_case: int = (-b - sqrt(__A )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def lowerCAmelCase_ ( ):
'''simple docstring'''
snake_case: int = quadratic_roots(a=5 , b=6 , c=1 )
print(f"""The solutions are: {solutiona} and {solutiona}""" )
if __name__ == "__main__":
main() | 711 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = "▁"
__UpperCAmelCase = {"vocab_file": "sentencepiece.bpe.model"}
__UpperCAmelCase = {
"vocab_file": {
"facebook/xglm-564M": "https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model",
}
}
__UpperCAmelCase = {
"facebook/xglm-564M": 2_048,
}
class SCREAMING_SNAKE_CASE ( snake_case ):
'''simple docstring'''
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = ["input_ids", "attention_mask"]
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="<unk>" , SCREAMING_SNAKE_CASE__="<pad>" , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ):
'''simple docstring'''
snake_case: Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
snake_case: Optional[Any] = 7
snake_case: List[str] = [F"""<madeupword{i}>""" for i in range(self.num_madeup_words )]
snake_case: str = kwargs.get('additional_special_tokens' , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE__ , )
snake_case: int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(SCREAMING_SNAKE_CASE__ ) )
snake_case: int = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
snake_case: Tuple = 1
# Mimic fairseq token-to-id alignment for the first 4 token
snake_case: Optional[Any] = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
snake_case: Union[str, Any] = len(self.sp_model )
snake_case: str = {F"""<madeupword{i}>""": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(SCREAMING_SNAKE_CASE__ )
snake_case: Union[str, Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ):
'''simple docstring'''
snake_case: List[Any] = self.__dict__.copy()
snake_case: Union[str, Any] = None
snake_case: Union[str, Any] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: Optional[int] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
snake_case: Union[str, Any] = {}
snake_case: Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
snake_case: Optional[Any] = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE__ , token_ids_a=SCREAMING_SNAKE_CASE__ , already_has_special_tokens=SCREAMING_SNAKE_CASE__ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ ))
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE__ ))
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
'''simple docstring'''
snake_case: int = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[int] = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return self.sp_model.encode(SCREAMING_SNAKE_CASE__ , out_type=SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
snake_case: Dict = self.sp_model.PieceToId(SCREAMING_SNAKE_CASE__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: Optional[Any] = ''.join(SCREAMING_SNAKE_CASE__ ).replace(SCREAMING_SNAKE_CASE__ , ' ' ).strip()
return out_string
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
'''simple docstring'''
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case: List[str] = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE__ , 'wb' ) as fi:
snake_case: int = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE__ )
return (out_vocab_file,) | 692 | 0 |
'''simple docstring'''
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def lowerCAmelCase_ ( __A : str , __A : Dict , __A : int , __A : Dict=5 ):
'''simple docstring'''
assert masked_input.count('<mask>' ) == 1
snake_case: List[Any] = torch.tensor(tokenizer.encode(__A , add_special_tokens=__A ) ).unsqueeze(0 ) # Batch size 1
snake_case: List[Any] = model(__A )[0] # The last hidden-state is the first element of the output tuple
snake_case: Optional[Any] = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
snake_case: str = logits[0, masked_index, :]
snake_case: int = logits.softmax(dim=0 )
snake_case: int = prob.topk(k=__A , dim=0 )
snake_case: int = ' '.join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(__A ) )] )
snake_case: List[Any] = tokenizer.mask_token
snake_case: Optional[int] = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(' ' ) ):
snake_case: Optional[int] = predicted_token_bpe.replace('\u2581' , ' ' )
if " {0}".format(__A ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(' {0}'.format(__A ) , __A ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(__A , __A ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
__UpperCAmelCase = CamembertTokenizer.from_pretrained("camembert-base")
__UpperCAmelCase = CamembertForMaskedLM.from_pretrained("camembert-base")
model.eval()
__UpperCAmelCase = "Le camembert est <mask> :)"
print(fill_mask(masked_input, model, tokenizer, topk=3)) | 712 |
'''simple docstring'''
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def lowerCAmelCase_ ( __A : Optional[Any] ):
'''simple docstring'''
return getitem, k
def lowerCAmelCase_ ( __A : Any , __A : Optional[int] ):
'''simple docstring'''
return setitem, k, v
def lowerCAmelCase_ ( __A : List[str] ):
'''simple docstring'''
return delitem, k
def lowerCAmelCase_ ( __A : str , __A : int , *__A : Tuple ):
'''simple docstring'''
try:
return fun(__A , *__A ), None
except Exception as e:
return None, e
__UpperCAmelCase = (
_set("key_a", "val_a"),
_set("key_b", "val_b"),
)
__UpperCAmelCase = [
_set("key_a", "val_a"),
_set("key_a", "val_b"),
]
__UpperCAmelCase = [
_set("key_a", "val_a"),
_set("key_b", "val_b"),
_del("key_a"),
_del("key_b"),
_set("key_a", "val_a"),
_del("key_a"),
]
__UpperCAmelCase = [
_get("key_a"),
_del("key_a"),
_set("key_a", "val_a"),
_del("key_a"),
_del("key_a"),
_get("key_a"),
]
__UpperCAmelCase = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
__UpperCAmelCase = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set("key_a", "val_b"),
]
@pytest.mark.parametrize(
'operations' , (
pytest.param(_add_items , id='add items' ),
pytest.param(_overwrite_items , id='overwrite items' ),
pytest.param(_delete_items , id='delete items' ),
pytest.param(_access_absent_items , id='access absent items' ),
pytest.param(_add_with_resize_up , id='add with resize up' ),
pytest.param(_add_with_resize_down , id='add with resize down' ),
) , )
def lowerCAmelCase_ ( __A : str ):
'''simple docstring'''
snake_case: List[Any] = HashMap(initial_block_size=4 )
snake_case: List[Any] = {}
for _, (fun, *args) in enumerate(__A ):
snake_case , snake_case: Optional[int] = _run_operation(__A , __A , *__A )
snake_case , snake_case: str = _run_operation(__A , __A , *__A )
assert my_res == py_res
assert str(__A ) == str(__A )
assert set(__A ) == set(__A )
assert len(__A ) == len(__A )
assert set(my.items() ) == set(py.items() )
def lowerCAmelCase_ ( ):
'''simple docstring'''
def is_public(__A : str ) -> bool:
return not name.startswith('_' )
snake_case: Dict = {name for name in dir({} ) if is_public(__A )}
snake_case: List[str] = {name for name in dir(HashMap() ) if is_public(__A )}
assert dict_public_names > hash_public_names | 692 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__UpperCAmelCase = {
"configuration_m2m_100": ["M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP", "M2M100Config", "M2M100OnnxConfig"],
"tokenization_m2m_100": ["M2M100Tokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST",
"M2M100ForConditionalGeneration",
"M2M100Model",
"M2M100PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 713 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
__UpperCAmelCase = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def lowerCAmelCase_ ( __A : Any , __A : Optional[Any] , __A : Union[str, Any] , __A : int , __A : Optional[int] ):
'''simple docstring'''
for attribute in key.split('.' ):
snake_case: List[str] = getattr(__A , __A )
if weight_type is not None:
snake_case: Optional[int] = getattr(__A , __A ).shape
else:
snake_case: Optional[int] = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
snake_case: Optional[int] = value
elif weight_type == "weight_g":
snake_case: List[str] = value
elif weight_type == "weight_v":
snake_case: Dict = value
elif weight_type == "bias":
snake_case: Optional[Any] = value
else:
snake_case: int = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def lowerCAmelCase_ ( __A : List[Any] , __A : List[str] ):
'''simple docstring'''
snake_case: List[Any] = []
snake_case: List[Any] = fairseq_model.state_dict()
snake_case: Union[str, Any] = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
snake_case: Dict = None
for name, value in fairseq_dict.items():
snake_case: Tuple = False
if "conv_layers" in name:
load_conv_layer(
__A , __A , __A , __A , hf_model.config.feat_extract_norm == 'group' , )
snake_case: List[Any] = True
elif name.split('.' )[0] == "proj":
snake_case: List[Any] = fairseq_model.proj
snake_case: int = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
snake_case: int = True
if "*" in mapped_key:
snake_case: List[str] = name.split(__A )[0].split('.' )[-2]
snake_case: Dict = mapped_key.replace('*' , __A )
if "weight_g" in name:
snake_case: Tuple = 'weight_g'
elif "weight_v" in name:
snake_case: int = 'weight_v'
elif "bias" in name:
snake_case: Tuple = 'bias'
elif "weight" in name:
snake_case: List[Any] = 'weight'
else:
snake_case: Any = None
set_recursively(__A , __A , __A , __A , __A )
continue
if not is_used:
unused_weights.append(__A )
logger.warning(f"""Unused weights: {unused_weights}""" )
return proj_weight
def lowerCAmelCase_ ( __A : List[str] , __A : List[Any] , __A : int , __A : Optional[Any] , __A : Optional[int] ):
'''simple docstring'''
snake_case: int = full_name.split('conv_layers.' )[-1]
snake_case: Tuple = name.split('.' )
snake_case: Any = int(items[0] )
snake_case: Optional[int] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
snake_case: Tuple = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
snake_case: int = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
snake_case: Any = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
snake_case: str = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__A )
def lowerCAmelCase_ ( __A : Dict ):
'''simple docstring'''
snake_case , snake_case: List[Any] = emb.weight.shape
snake_case: Optional[int] = nn.Linear(__A , __A , bias=__A )
snake_case: Any = emb.weight.data
return lin_layer
def lowerCAmelCase_ ( __A : Optional[int] ):
'''simple docstring'''
with open(__A , 'r' , encoding='utf-8' ) as f:
snake_case: List[Any] = f.readlines()
snake_case: Any = [line.split(' ' )[0] for line in lines]
snake_case: int = len(__A )
snake_case: Dict = {
'<s>': 0,
'<pad>': 1,
'</s>': 2,
'<unk>': 3,
}
vocab_dict.update(dict(zip(__A , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def lowerCAmelCase_ ( __A : Optional[Any] , __A : Union[str, Any] , __A : Dict , __A : Any , __A : List[Any] , __A : int , __A : str , ):
'''simple docstring'''
snake_case: Union[str, Any] = WavaVecaConfig.from_pretrained(__A )
snake_case: str = SpeechaTextaConfig.from_pretrained(
__A , vocab_size=__A , decoder_layers=__A , do_stable_layer_norm=__A )
snake_case: List[str] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=__A , return_attention_mask=__A , )
snake_case , snake_case , snake_case: List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
snake_case: List[Any] = model[0].eval()
# set weights for wav2vec2 encoder
snake_case: Optional[Any] = WavaVecaModel(__A )
snake_case: Any = recursively_load_weights_wavaveca(model.encoder , __A )
snake_case: Union[str, Any] = SpeechaTextaForCausalLM(__A )
snake_case , snake_case: Optional[Any] = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=__A )
# set output linear layer
unexpected_keys.remove('embed_out' )
snake_case: str = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(f"""The following keys are missing when loading the decoder weights: {missing_keys}""" )
logger.warning(f"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" )
snake_case: int = SpeechEncoderDecoderModel(encoder=__A , decoder=__A )
snake_case: List[Any] = False
# add projection layer
snake_case: Union[str, Any] = nn.Parameter(projection_layer.weight )
snake_case: Union[str, Any] = nn.Parameter(projection_layer.bias )
snake_case: List[Any] = create_vocab_dict(__A )
with open(os.path.join(__A , 'vocab.json' ) , 'w' ) as fp:
json.dump(__A , __A )
snake_case: Union[str, Any] = SpeechaTextaTokenizer(os.path.join(__A , 'vocab.json' ) )
tokenizer.save_pretrained(__A )
snake_case: Tuple = hf_wavavec.config.to_dict()
snake_case: int = tokenizer.pad_token_id
snake_case: Dict = tokenizer.bos_token_id
snake_case: Optional[int] = tokenizer.eos_token_id
snake_case: Dict = 'speech_to_text_2'
snake_case: Optional[Any] = 'wav2vec2'
snake_case: Tuple = SpeechEncoderDecoderConfig.from_dict(__A )
hf_wavavec.save_pretrained(__A )
feature_extractor.save_pretrained(__A )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument(
"--encoder_config_path",
default="facebook/wav2vec2-large-lv60",
type=str,
help="Path to hf encoder wav2vec2 checkpoint config",
)
parser.add_argument(
"--decoder_config_path",
default="facebook/s2t-small-mustc-en-fr-st",
type=str,
help="Path to hf decoder s2t checkpoint config",
)
parser.add_argument("--vocab_size", default=10_224, type=int, help="Vocab size of decoder")
parser.add_argument("--num_decoder_layers", default=7, type=int, help="Number of decoder layers")
__UpperCAmelCase = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
) | 692 | 0 |
'''simple docstring'''
def lowerCAmelCase_ ( __A : int , __A : int , __A : list[list[int]] ):
'''simple docstring'''
def update_area_of_max_square(__A : int , __A : int ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
snake_case: List[Any] = update_area_of_max_square(__A , col + 1 )
snake_case: Dict = update_area_of_max_square(row + 1 , col + 1 )
snake_case: List[Any] = update_area_of_max_square(row + 1 , __A )
if mat[row][col]:
snake_case: Optional[int] = 1 + min([right, diagonal, down] )
snake_case: int = max(largest_square_area[0] , __A )
return sub_problem_sol
else:
return 0
snake_case: Tuple = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def lowerCAmelCase_ ( __A : int , __A : int , __A : list[list[int]] ):
'''simple docstring'''
def update_area_of_max_square_using_dp_array(
__A : int , __A : int , __A : list[list[int]] ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
snake_case: Optional[Any] = update_area_of_max_square_using_dp_array(__A , col + 1 , __A )
snake_case: Dict = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , __A )
snake_case: List[Any] = update_area_of_max_square_using_dp_array(row + 1 , __A , __A )
if mat[row][col]:
snake_case: str = 1 + min([right, diagonal, down] )
snake_case: Any = max(largest_square_area[0] , __A )
snake_case: int = sub_problem_sol
return sub_problem_sol
else:
return 0
snake_case: Tuple = [0]
snake_case: Union[str, Any] = [[-1] * cols for _ in range(__A )]
update_area_of_max_square_using_dp_array(0 , 0 , __A )
return largest_square_area[0]
def lowerCAmelCase_ ( __A : int , __A : int , __A : list[list[int]] ):
'''simple docstring'''
snake_case: str = [[0] * (cols + 1) for _ in range(rows + 1 )]
snake_case: Dict = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
snake_case: Optional[int] = dp_array[row][col + 1]
snake_case: Optional[int] = dp_array[row + 1][col + 1]
snake_case: int = dp_array[row + 1][col]
if mat[row][col] == 1:
snake_case: Optional[Any] = 1 + min(__A , __A , __A )
snake_case: List[Any] = max(dp_array[row][col] , __A )
else:
snake_case: Optional[Any] = 0
return largest_square_area
def lowerCAmelCase_ ( __A : int , __A : int , __A : list[list[int]] ):
'''simple docstring'''
snake_case: str = [0] * (cols + 1)
snake_case: Union[str, Any] = [0] * (cols + 1)
snake_case: Union[str, Any] = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
snake_case: Any = current_row[col + 1]
snake_case: str = next_row[col + 1]
snake_case: List[str] = next_row[col]
if mat[row][col] == 1:
snake_case: Dict = 1 + min(__A , __A , __A )
snake_case: Dict = max(current_row[col] , __A )
else:
snake_case: str = 0
snake_case: List[str] = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]])) | 714 |
'''simple docstring'''
def lowerCAmelCase_ ( __A : int = 1_00 ):
'''simple docstring'''
snake_case: List[str] = n * (n + 1) * (2 * n + 1) / 6
snake_case: List[Any] = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F'{solution() = }') | 692 | 0 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: int = tempfile.mkdtemp()
snake_case: str = SamImageProcessor()
snake_case: Tuple = SamProcessor(SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(self.tmpdirname )
def _UpperCamelCase ( self , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ).image_processor
def _UpperCamelCase ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Any = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
snake_case: str = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Dict = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
snake_case: Union[str, Any] = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE__ , padding_value=1.0 )
snake_case: Optional[int] = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=SCREAMING_SNAKE_CASE__ , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[str] = self.get_image_processor()
snake_case: Any = SamProcessor(image_processor=SCREAMING_SNAKE_CASE__ )
snake_case: Any = self.prepare_image_inputs()
snake_case: Optional[Any] = image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='np' )
snake_case: Optional[int] = processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='np' )
input_feat_extract.pop('original_sizes' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('reshaped_input_sizes' ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_torch
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[Any] = self.get_image_processor()
snake_case: Optional[Any] = SamProcessor(image_processor=SCREAMING_SNAKE_CASE__ )
snake_case: int = [torch.ones((1, 3, 5, 5) )]
snake_case: str = [[17_64, 26_46]]
snake_case: str = [[6_83, 10_24]]
snake_case: Union[str, Any] = processor.post_process_masks(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
snake_case: Optional[Any] = processor.post_process_masks(
SCREAMING_SNAKE_CASE__ , torch.tensor(SCREAMING_SNAKE_CASE__ ) , torch.tensor(SCREAMING_SNAKE_CASE__ ) )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
# should also work with np
snake_case: Tuple = [np.ones((1, 3, 5, 5) )]
snake_case: Optional[int] = processor.post_process_masks(SCREAMING_SNAKE_CASE__ , np.array(SCREAMING_SNAKE_CASE__ ) , np.array(SCREAMING_SNAKE_CASE__ ) )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
snake_case: str = [[1, 0], [0, 1]]
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
snake_case: str = processor.post_process_masks(SCREAMING_SNAKE_CASE__ , np.array(SCREAMING_SNAKE_CASE__ ) , np.array(SCREAMING_SNAKE_CASE__ ) )
@require_vision
@require_tf
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[str] = tempfile.mkdtemp()
snake_case: Optional[Any] = SamImageProcessor()
snake_case: Tuple = SamProcessor(SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(self.tmpdirname )
def _UpperCamelCase ( self , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ).image_processor
def _UpperCamelCase ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Union[str, Any] = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
snake_case: Union[str, Any] = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: str = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
snake_case: List[Any] = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE__ , padding_value=1.0 )
snake_case: Optional[int] = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=SCREAMING_SNAKE_CASE__ , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Any = self.get_image_processor()
snake_case: int = SamProcessor(image_processor=SCREAMING_SNAKE_CASE__ )
snake_case: Any = self.prepare_image_inputs()
snake_case: Tuple = image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='np' )
snake_case: Optional[Any] = processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='np' )
input_feat_extract.pop('original_sizes' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('reshaped_input_sizes' ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_tf
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Dict = self.get_image_processor()
snake_case: str = SamProcessor(image_processor=SCREAMING_SNAKE_CASE__ )
snake_case: List[Any] = [tf.ones((1, 3, 5, 5) )]
snake_case: int = [[17_64, 26_46]]
snake_case: Optional[int] = [[6_83, 10_24]]
snake_case: Tuple = processor.post_process_masks(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_tensors='tf' )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
snake_case: Optional[Any] = processor.post_process_masks(
SCREAMING_SNAKE_CASE__ , tf.convert_to_tensor(SCREAMING_SNAKE_CASE__ ) , tf.convert_to_tensor(SCREAMING_SNAKE_CASE__ ) , return_tensors='tf' , )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
# should also work with np
snake_case: Dict = [np.ones((1, 3, 5, 5) )]
snake_case: Dict = processor.post_process_masks(
SCREAMING_SNAKE_CASE__ , np.array(SCREAMING_SNAKE_CASE__ ) , np.array(SCREAMING_SNAKE_CASE__ ) , return_tensors='tf' )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
snake_case: Dict = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
snake_case: List[Any] = processor.post_process_masks(
SCREAMING_SNAKE_CASE__ , np.array(SCREAMING_SNAKE_CASE__ ) , np.array(SCREAMING_SNAKE_CASE__ ) , return_tensors='tf' )
@require_vision
@require_torchvision
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Union[str, Any] = tempfile.mkdtemp()
snake_case: List[str] = SamImageProcessor()
snake_case: Tuple = SamProcessor(SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(self.tmpdirname )
def _UpperCamelCase ( self , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ).image_processor
def _UpperCamelCase ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[Any] = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
snake_case: List[str] = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[Any] = self.get_image_processor()
snake_case: str = SamProcessor(image_processor=SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa )
snake_case: Tuple = [tf.convert_to_tensor(SCREAMING_SNAKE_CASE__ )]
snake_case: Union[str, Any] = [torch.tensor(SCREAMING_SNAKE_CASE__ )]
snake_case: Tuple = [[17_64, 26_46]]
snake_case: Dict = [[6_83, 10_24]]
snake_case: List[Any] = processor.post_process_masks(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_tensors='tf' )
snake_case: Optional[int] = processor.post_process_masks(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_tensors='pt' )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Tuple = self.get_image_processor()
snake_case: Tuple = SamProcessor(image_processor=SCREAMING_SNAKE_CASE__ )
snake_case: Dict = self.prepare_image_inputs()
snake_case: Optional[Any] = image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='pt' )['pixel_values'].numpy()
snake_case: Optional[int] = processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='pt' )['pixel_values'].numpy()
snake_case: Dict = image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='tf' )['pixel_values'].numpy()
snake_case: Optional[int] = processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='tf' )['pixel_values'].numpy()
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) | 715 |
'''simple docstring'''
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
__UpperCAmelCase = [
# tf -> hf
("/", "."),
("layer_", "layers."),
("kernel", "weight"),
("beta", "bias"),
("gamma", "weight"),
("pegasus", "model"),
]
__UpperCAmelCase = [
(".output.dense", ".fc2"),
("intermediate.LayerNorm", "final_layer_norm"),
("intermediate.dense", "fc1"),
]
__UpperCAmelCase = (
INIT_COMMON
+ [
("attention.self.LayerNorm", "self_attn_layer_norm"),
("attention.output.dense", "self_attn.out_proj"),
("attention.self", "self_attn"),
("attention.encdec.LayerNorm", "encoder_attn_layer_norm"),
("attention.encdec_output.dense", "encoder_attn.out_proj"),
("attention.encdec", "encoder_attn"),
("key", "k_proj"),
("value", "v_proj"),
("query", "q_proj"),
("decoder.LayerNorm", "decoder.layernorm_embedding"),
]
+ END_COMMON
)
__UpperCAmelCase = (
INIT_COMMON
+ [
("embeddings.word_embeddings", "shared.weight"),
("embeddings.position_embeddings", "embed_positions.weight"),
("attention.self.LayerNorm", "self_attn_layer_norm"),
("attention.output.dense", "self_attn.output"),
("attention.self", "self_attn.self"),
("encoder.LayerNorm", "encoder.layernorm_embedding"),
]
+ END_COMMON
)
__UpperCAmelCase = [
"encdec/key/bias",
"encdec/query/bias",
"encdec/value/bias",
"self/key/bias",
"self/query/bias",
"self/value/bias",
"encdec_output/dense/bias",
"attention/output/dense/bias",
]
def lowerCAmelCase_ ( __A : Dict , __A : List[Any] ):
'''simple docstring'''
for tf_name, hf_name in patterns:
snake_case: List[Any] = k.replace(__A , __A )
return k
def lowerCAmelCase_ ( __A : dict , __A : dict ):
'''simple docstring'''
snake_case: Optional[int] = BigBirdPegasusConfig(**__A )
snake_case: List[Any] = BigBirdPegasusForConditionalGeneration(__A )
snake_case: Any = torch_model.state_dict()
snake_case: Any = {}
# separating decoder weights
snake_case: Optional[Any] = {k: tf_weights[k] for k in tf_weights if k.startswith('pegasus/decoder' )}
snake_case: Any = {k: tf_weights[k] for k in tf_weights if not k.startswith('pegasus/decoder' )}
for k, v in tqdm(decoder_weights.items() , 'tf -> hf conversion' ):
snake_case: List[str] = [k.endswith(__A ) for ending in KEYS_TO_IGNORE]
if any(__A ):
continue
snake_case: Any = DECODER_PATTERNS
snake_case: int = rename_state_dict_key(__A , __A )
if new_k not in state_dict:
raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ['dense', 'query', 'key', 'value'] ):
snake_case: Optional[Any] = v.T
snake_case: Any = torch.from_numpy(__A )
assert v.shape == state_dict[new_k].shape, f"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
for k, v in tqdm(remaining_weights.items() , 'tf -> hf conversion' ):
snake_case: List[Any] = [k.endswith(__A ) for ending in KEYS_TO_IGNORE]
if any(__A ):
continue
snake_case: Union[str, Any] = REMAINING_PATTERNS
snake_case: str = rename_state_dict_key(__A , __A )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ['dense', 'query', 'key', 'value'] ):
snake_case: int = v.T
snake_case: Any = torch.from_numpy(__A )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, f"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
snake_case: str = mapping['model.embed_positions.weight']
snake_case: Any = mapping.pop('model.embed_positions.weight' )
snake_case , snake_case: Union[str, Any] = torch_model.load_state_dict(__A , strict=__A )
snake_case: Optional[int] = [
k
for k in missing
if k
not in [
'final_logits_bias',
'model.encoder.embed_tokens.weight',
'model.decoder.embed_tokens.weight',
'lm_head.weight',
]
]
assert unexpected_missing == [], f"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], f"""no matches found for the following tf keys {extra}"""
return torch_model
def lowerCAmelCase_ ( __A : Optional[int] ):
'''simple docstring'''
snake_case: Tuple = tf.train.list_variables(__A )
snake_case: str = {}
snake_case: List[str] = ['global_step']
for name, shape in tqdm(__A , desc='converting tf checkpoint to dict' ):
snake_case: str = any(pat in name for pat in ignore_name )
if skip_key:
continue
snake_case: Any = tf.train.load_variable(__A , __A )
snake_case: Optional[int] = array
return tf_weights
def lowerCAmelCase_ ( __A : str , __A : str , __A : dict ):
'''simple docstring'''
snake_case: int = get_tf_weights_as_numpy(__A )
snake_case: int = convert_bigbird_pegasus(__A , __A )
torch_model.save_pretrained(__A )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("--tf_ckpt_path", type=str, help="passed to tf.train.list_variables")
parser.add_argument("--save_dir", default=None, type=str, help="Path to the output PyTorch model.")
__UpperCAmelCase = parser.parse_args()
__UpperCAmelCase = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update) | 692 | 0 |
'''simple docstring'''
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class SCREAMING_SNAKE_CASE ( snake_case , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = FlaxAutoencoderKL
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: str = 4
snake_case: Optional[int] = 3
snake_case: List[Any] = (32, 32)
snake_case: List[Any] = jax.random.PRNGKey(0 )
snake_case: Dict = jax.random.uniform(SCREAMING_SNAKE_CASE__ , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: str = {
'block_out_channels': [32, 64],
'in_channels': 3,
'out_channels': 3,
'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'],
'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'],
'latent_channels': 4,
}
snake_case: Any = self.dummy_input
return init_dict, inputs_dict | 716 |
'''simple docstring'''
def lowerCAmelCase_ ( __A : List[str] ):
'''simple docstring'''
snake_case: str = [0] * len(__A )
snake_case: Tuple = []
snake_case: Tuple = [1] * len(__A )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(__A ) ):
if indegree[i] == 0:
queue.append(__A )
while queue:
snake_case: int = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
snake_case: Any = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(__A )
print(max(__A ) )
# Adjacency list of Graph
__UpperCAmelCase = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph) | 692 | 0 |
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
__UpperCAmelCase = "bart"
__UpperCAmelCase = True
@st.cache(allow_output_mutation=__A )
def lowerCAmelCase_ ( ):
'''simple docstring'''
if LOAD_DENSE_INDEX:
snake_case: Any = AutoTokenizer.from_pretrained('yjernite/retribert-base-uncased' )
snake_case: Any = AutoModel.from_pretrained('yjernite/retribert-base-uncased' ).to('cuda:0' )
snake_case: List[str] = qar_model.eval()
else:
snake_case: Dict = (None, None)
if MODEL_TYPE == "bart":
snake_case: List[Any] = AutoTokenizer.from_pretrained('yjernite/bart_eli5' )
snake_case: Optional[int] = AutoModelForSeqaSeqLM.from_pretrained('yjernite/bart_eli5' ).to('cuda:0' )
snake_case: str = torch.load('seq2seq_models/eli5_bart_model_blm_2.pth' )
sas_model.load_state_dict(save_dict['model'] )
snake_case: Dict = sas_model.eval()
else:
snake_case: str = make_qa_sas_model(
model_name='t5-small' , from_file='seq2seq_models/eli5_t5_model_1024_4.pth' , device='cuda:0' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=__A )
def lowerCAmelCase_ ( ):
'''simple docstring'''
if LOAD_DENSE_INDEX:
snake_case: Tuple = faiss.StandardGpuResources()
snake_case: List[Any] = datasets.load_dataset(path='wiki_snippets' , name='wiki40b_en_100_0' )['train']
snake_case: int = np.memmap(
'wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat' , dtype='float32' , mode='r' , shape=(wikiaab_passages.num_rows, 1_28) , )
snake_case: List[str] = faiss.IndexFlatIP(1_28 )
snake_case: List[Any] = faiss.index_cpu_to_gpu(__A , 1 , __A )
wikiaab_gpu_index_flat.add(__A ) # TODO fix for larger GPU
else:
snake_case: Optional[Any] = (None, None)
snake_case: List[Any] = Elasticsearch([{'host': 'localhost', 'port': '9200'}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=__A )
def lowerCAmelCase_ ( ):
'''simple docstring'''
snake_case: str = datasets.load_dataset('eli5' , name='LFQA_reddit' )
snake_case: Optional[int] = elia['train_eli5']
snake_case: Optional[Any] = np.memmap(
'eli5_questions_reps.dat' , dtype='float32' , mode='r' , shape=(elia_train.num_rows, 1_28) )
snake_case: List[str] = faiss.IndexFlatIP(1_28 )
eli5_train_q_index.add(__A )
return (elia_train, eli5_train_q_index)
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = load_indexes()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = load_models()
__UpperCAmelCase , __UpperCAmelCase = load_train_data()
def lowerCAmelCase_ ( __A : Union[str, Any] , __A : List[Any]=10 ):
'''simple docstring'''
snake_case: List[Any] = embed_questions_for_retrieval([question] , __A , __A )
snake_case: Optional[int] = eli5_train_q_index.search(__A , __A )
snake_case: Optional[int] = [elia_train[int(__A )] for i in I[0]]
return nn_examples
def lowerCAmelCase_ ( __A : List[str] , __A : Dict="wiki40b" , __A : Optional[Any]="dense" , __A : Union[str, Any]=10 ):
'''simple docstring'''
if source == "none":
snake_case: List[str] = (' <P> '.join(['' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
snake_case: Optional[Any] = query_qa_dense_index(
__A , __A , __A , __A , __A , __A )
else:
snake_case: Any = query_es_index(
__A , __A , index_name='english_wiki40b_snippets_100w' , n_results=__A , )
snake_case: Union[str, Any] = [
(res['article_title'], res['section_title'].strip(), res['score'], res['passage_text']) for res in hit_lst
]
snake_case: Dict = 'question: {} context: {}'.format(__A , __A )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda __A : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda __A : None),
} )
def lowerCAmelCase_ ( __A : Tuple , __A : Optional[int] , __A : str , __A : List[str]=64 , __A : Any=2_56 , __A : Dict=False , __A : Optional[Any]=2 , __A : List[str]=0.95 , __A : Any=0.8 ):
'''simple docstring'''
with torch.no_grad():
snake_case: Optional[Any] = qa_sas_generate(
__A , __A , __A , num_answers=1 , num_beams=__A , min_len=__A , max_len=__A , do_sample=__A , temp=__A , top_p=__A , top_k=__A , max_input_length=10_24 , device='cuda:0' , )[0]
return (answer, support_list)
st.title("Long Form Question Answering with ELI5")
# Start sidebar
__UpperCAmelCase = "<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"
__UpperCAmelCase = "\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class=\"img-container\"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
__UpperCAmelCase = "\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n"
st.sidebar.markdown(description, unsafe_allow_html=True)
__UpperCAmelCase = [
"Answer the question",
"View the retrieved document only",
"View the most similar ELI5 question and answer",
"Show me everything, please!",
]
__UpperCAmelCase = st.sidebar.checkbox("Demo options")
if demo_options:
__UpperCAmelCase = st.sidebar.selectbox(
"",
action_list,
index=3,
)
__UpperCAmelCase = action_list.index(action_st)
__UpperCAmelCase = st.sidebar.selectbox(
"",
["Show full text of passages", "Show passage section titles"],
index=0,
)
__UpperCAmelCase = show_type == "Show full text of passages"
else:
__UpperCAmelCase = 3
__UpperCAmelCase = True
__UpperCAmelCase = st.sidebar.checkbox("Retrieval options")
if retrieval_options:
__UpperCAmelCase = "\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n "
st.sidebar.markdown(retriever_info)
__UpperCAmelCase = st.sidebar.selectbox("Which Wikipedia format should the model use?", ["wiki40b", "none"])
__UpperCAmelCase = st.sidebar.selectbox("Which Wikipedia indexer should the model use?", ["dense", "sparse", "mixed"])
else:
__UpperCAmelCase = "wiki40b"
__UpperCAmelCase = "dense"
__UpperCAmelCase = "beam"
__UpperCAmelCase = 2
__UpperCAmelCase = 64
__UpperCAmelCase = 256
__UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = st.sidebar.checkbox("Generation options")
if generate_options:
__UpperCAmelCase = "\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder's output probabilities.\n "
st.sidebar.markdown(generate_info)
__UpperCAmelCase = st.sidebar.selectbox("Would you like to use beam search or sample an answer?", ["beam", "sampled"])
__UpperCAmelCase = st.sidebar.slider(
"Minimum generation length", min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
__UpperCAmelCase = st.sidebar.slider(
"Maximum generation length", min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
__UpperCAmelCase = st.sidebar.slider("Beam size", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
__UpperCAmelCase = st.sidebar.slider(
"Nucleus sampling p", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
__UpperCAmelCase = st.sidebar.slider(
"Temperature", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
__UpperCAmelCase = None
# start main text
__UpperCAmelCase = [
"<MY QUESTION>",
"How do people make chocolate?",
"Why do we get a fever when we are sick?",
"How can different animals perceive different colors?",
"What is natural language processing?",
"What's the best way to treat a sunburn?",
"What exactly are vitamins ?",
"How does nuclear energy provide electricity?",
"What's the difference between viruses and bacteria?",
"Why are flutes classified as woodwinds when most of them are made out of metal ?",
"Why do people like drinking coffee even though it tastes so bad?",
"What happens when wine ages? How does it make the wine taste better?",
"If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?",
"How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?",
"How does New Zealand have so many large bird predators?",
]
__UpperCAmelCase = st.selectbox(
"What would you like to ask? ---- select <MY QUESTION> to enter a new query",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
__UpperCAmelCase = st.text_input("Enter your question here:", "")
else:
__UpperCAmelCase = question_s
if st.button("Show me!"):
if action in [0, 1, 3]:
if index_type == "mixed":
__UpperCAmelCase , __UpperCAmelCase = make_support(question, source=wiki_source, method="dense", n_results=10)
__UpperCAmelCase , __UpperCAmelCase = make_support(question, source=wiki_source, method="sparse", n_results=10)
__UpperCAmelCase = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
__UpperCAmelCase = support_list[:10]
__UpperCAmelCase = "<P> " + " <P> ".join([res[-1] for res in support_list])
else:
__UpperCAmelCase , __UpperCAmelCase = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
__UpperCAmelCase , __UpperCAmelCase = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == "sampled"),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("### The model generated answer is:")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("--- \n ### The model is drawing information from the following Wikipedia passages:")
for i, res in enumerate(support_list):
__UpperCAmelCase = "https://en.wikipedia.org/wiki/{}".format(res[0].replace(" ", "_"))
__UpperCAmelCase = res[1].strip()
if sec_titles == "":
__UpperCAmelCase = "[{}]({})".format(res[0], wiki_url)
else:
__UpperCAmelCase = sec_titles.split(" & ")
__UpperCAmelCase = " & ".join(
["[{}]({}#{})".format(sec.strip(), wiki_url, sec.strip().replace(" ", "_")) for sec in sec_list]
)
st.markdown(
"{0:02d} - **Article**: {1:<18} <br> _Section_: {2}".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"> <span style=\"font-family:arial; font-size:10pt;\">" + res[-1] + "</span>", unsafe_allow_html=True
)
if action in [2, 3]:
__UpperCAmelCase = find_nearest_training(question)
__UpperCAmelCase = nn_train_list[0]
st.markdown(
"--- \n ### The most similar question in the ELI5 training set was: \n\n {}".format(train_exple["title"])
)
__UpperCAmelCase = [
"{}. {}".format(i + 1, " \n".join([line.strip() for line in ans.split("\n") if line.strip() != ""]))
for i, (ans, sc) in enumerate(zip(train_exple["answers"]["text"], train_exple["answers"]["score"]))
if i == 0 or sc > 2
]
st.markdown("##### Its answers were: \n\n {}".format("\n".join(answers_st)))
__UpperCAmelCase = "\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n"
st.sidebar.markdown(disclaimer, unsafe_allow_html=True) | 717 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[Any] = tempfile.mkdtemp()
snake_case: Optional[Any] = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'的',
'价',
'格',
'是',
'15',
'便',
'alex',
'##andra',
',',
'。',
'-',
't',
'shirt',
]
snake_case: Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
snake_case: Optional[int] = {
'do_resize': True,
'size': {'height': 2_24, 'width': 2_24},
'do_center_crop': True,
'crop_size': {'height': 18, 'width': 18},
'do_normalize': True,
'image_mean': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'image_std': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
'do_convert_rgb': True,
}
snake_case: Union[str, Any] = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE__ )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return BertTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[int] = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
snake_case: Tuple = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[Any] = self.get_tokenizer()
snake_case: Union[str, Any] = self.get_rust_tokenizer()
snake_case: Union[str, Any] = self.get_image_processor()
snake_case: List[str] = ChineseCLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
processor_slow.save_pretrained(self.tmpdirname )
snake_case: List[str] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=SCREAMING_SNAKE_CASE__ )
snake_case: Any = ChineseCLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
processor_fast.save_pretrained(self.tmpdirname )
snake_case: Optional[int] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(processor_fast.tokenizer , SCREAMING_SNAKE_CASE__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(processor_fast.image_processor , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[Any] = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
snake_case: Optional[int] = self.get_tokenizer(cls_token='(CLS)' , sep_token='(SEP)' )
snake_case: Union[str, Any] = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE__ )
snake_case: Union[str, Any] = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token='(CLS)' , sep_token='(SEP)' , do_normalize=SCREAMING_SNAKE_CASE__ )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[int] = self.get_image_processor()
snake_case: Tuple = self.get_tokenizer()
snake_case: Optional[Any] = ChineseCLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = self.prepare_image_inputs()
snake_case: List[Any] = image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='np' )
snake_case: Dict = processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[Any] = self.get_image_processor()
snake_case: Optional[int] = self.get_tokenizer()
snake_case: List[Any] = ChineseCLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = 'Alexandra,T-shirt的价格是15便士。'
snake_case: Union[str, Any] = processor(text=SCREAMING_SNAKE_CASE__ )
snake_case: Optional[Any] = tokenizer(SCREAMING_SNAKE_CASE__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: int = self.get_image_processor()
snake_case: Dict = self.get_tokenizer()
snake_case: Optional[int] = ChineseCLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
snake_case: Dict = 'Alexandra,T-shirt的价格是15便士。'
snake_case: Tuple = self.prepare_image_inputs()
snake_case: Any = processor(text=SCREAMING_SNAKE_CASE__ , images=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
processor()
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: str = self.get_image_processor()
snake_case: str = self.get_tokenizer()
snake_case: Union[str, Any] = ChineseCLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
snake_case: int = processor.batch_decode(SCREAMING_SNAKE_CASE__ )
snake_case: Dict = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Dict = self.get_image_processor()
snake_case: Dict = self.get_tokenizer()
snake_case: Optional[int] = ChineseCLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
snake_case: Tuple = 'Alexandra,T-shirt的价格是15便士。'
snake_case: List[Any] = self.prepare_image_inputs()
snake_case: Dict = processor(text=SCREAMING_SNAKE_CASE__ , images=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names ) | 692 | 0 |
'''simple docstring'''
def lowerCAmelCase_ ( __A : Any=2_81_23 ):
'''simple docstring'''
snake_case: Optional[int] = [1] * (limit + 1)
for i in range(2 , int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1 , limit // i + 1 ):
sum_divs[k * i] += k + i
snake_case: Dict = set()
snake_case: Tuple = 0
for n in range(1 , limit + 1 ):
if sum_divs[n] > n:
abundants.add(__A )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution()) | 718 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"microsoft/swinv2-tiny-patch4-window8-256": (
"https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"
),
}
class SCREAMING_SNAKE_CASE ( snake_case ):
'''simple docstring'''
__UpperCamelCase = "swinv2"
__UpperCamelCase = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , SCREAMING_SNAKE_CASE__=2_24 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=96 , SCREAMING_SNAKE_CASE__=[2, 2, 6, 2] , SCREAMING_SNAKE_CASE__=[3, 6, 12, 24] , SCREAMING_SNAKE_CASE__=7 , SCREAMING_SNAKE_CASE__=4.0 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-5 , SCREAMING_SNAKE_CASE__=32 , **SCREAMING_SNAKE_CASE__ , ):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE__ )
snake_case: int = image_size
snake_case: Union[str, Any] = patch_size
snake_case: List[str] = num_channels
snake_case: Tuple = embed_dim
snake_case: str = depths
snake_case: Any = len(SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = num_heads
snake_case: Optional[int] = window_size
snake_case: Any = mlp_ratio
snake_case: Optional[int] = qkv_bias
snake_case: Union[str, Any] = hidden_dropout_prob
snake_case: List[str] = attention_probs_dropout_prob
snake_case: Dict = drop_path_rate
snake_case: List[str] = hidden_act
snake_case: int = use_absolute_embeddings
snake_case: Any = layer_norm_eps
snake_case: Dict = initializer_range
snake_case: List[Any] = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
snake_case: Tuple = int(embed_dim * 2 ** (len(SCREAMING_SNAKE_CASE__ ) - 1) )
snake_case: Union[str, Any] = (0, 0, 0, 0) | 692 | 0 |
'''simple docstring'''
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def lowerCAmelCase_ ( __A : int ):
'''simple docstring'''
if not is_accelerate_available():
return method
snake_case: Optional[Any] = version.parse(accelerate.__version__ ).base_version
if version.parse(__A ) < version.parse('0.17.0' ):
return method
def wrapper(self : Any , *__A : List[str] , **__A : Union[str, Any] ):
if hasattr(self , '_hf_hook' ) and hasattr(self._hf_hook , 'pre_forward' ):
self._hf_hook.pre_forward(self )
return method(self , *__A , **__A )
return wrapper
| 719 |
'''simple docstring'''
import os
import sys
import unittest
__UpperCAmelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__UpperCAmelCase = os.path.join(git_repo_path, "src", "transformers")
__UpperCAmelCase = "\n{0} = None\n"
__UpperCAmelCase = "\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n"
__UpperCAmelCase = "\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n"
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[str] = find_backend(' _import_structure["models.albert"].append("AlbertTokenizerFast")' )
self.assertIsNone(SCREAMING_SNAKE_CASE__ )
snake_case: List[str] = find_backend(' if not is_tokenizers_available():' )
self.assertEqual(SCREAMING_SNAKE_CASE__ , 'tokenizers' )
snake_case: List[Any] = find_backend(' if not is_tensorflow_text_available():' )
self.assertEqual(SCREAMING_SNAKE_CASE__ , 'tensorflow_text' )
snake_case: int = find_backend(' if not (is_sentencepiece_available() and is_tokenizers_available()):' )
self.assertEqual(SCREAMING_SNAKE_CASE__ , 'sentencepiece_and_tokenizers' )
snake_case: Optional[Any] = find_backend(
' if not (is_sentencepiece_available() and is_tensorflow_text_available()):' )
self.assertEqual(SCREAMING_SNAKE_CASE__ , 'sentencepiece_and_tensorflow_text' )
snake_case: Dict = find_backend(
' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):' )
self.assertEqual(SCREAMING_SNAKE_CASE__ , 'sentencepiece_and_tokenizers_and_vision' )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: str = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('torch' , SCREAMING_SNAKE_CASE__ )
self.assertIn('tensorflow_text' , SCREAMING_SNAKE_CASE__ )
self.assertIn('sentencepiece_and_tokenizers' , SCREAMING_SNAKE_CASE__ )
# Likewise, we can't assert on the exact content of a key
self.assertIn('BertModel' , objects['torch'] )
self.assertIn('TFBertModel' , objects['tf'] )
self.assertIn('FlaxBertModel' , objects['flax'] )
self.assertIn('BertModel' , objects['torch'] )
self.assertIn('TFBertTokenizer' , objects['tensorflow_text'] )
self.assertIn('convert_slow_tokenizer' , objects['sentencepiece_and_tokenizers'] )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Dict = create_dummy_object('CONSTANT' , '\'torch\'' )
self.assertEqual(SCREAMING_SNAKE_CASE__ , '\nCONSTANT = None\n' )
snake_case: Any = create_dummy_object('function' , '\'torch\'' )
self.assertEqual(
SCREAMING_SNAKE_CASE__ , '\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n' )
snake_case: Optional[int] = '\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n'
snake_case: Tuple = create_dummy_object('FakeClass' , '\'torch\'' )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Dict = '# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, ["torch"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = ["torch"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, ["torch"])\n'
snake_case: Optional[int] = create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']} )
self.assertEqual(dummy_files['torch'] , SCREAMING_SNAKE_CASE__ ) | 692 | 0 |
'''simple docstring'''
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def lowerCAmelCase_ ( __A : Tuple ):
'''simple docstring'''
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def lowerCAmelCase_ ( ):
'''simple docstring'''
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def lowerCAmelCase_ ( ):
'''simple docstring'''
snake_case: Union[str, Any] = 'mock-s3-bucket'
snake_case: int = f"""s3://{mock_bucket}"""
snake_case: Any = extract_path_from_uri(__A )
assert dataset_path.startswith('s3://' ) is False
snake_case: Union[str, Any] = './local/path'
snake_case: Union[str, Any] = extract_path_from_uri(__A )
assert dataset_path == new_dataset_path
def lowerCAmelCase_ ( __A : Any ):
'''simple docstring'''
snake_case: List[str] = is_remote_filesystem(__A )
assert is_remote is True
snake_case: int = fsspec.filesystem('file' )
snake_case: int = is_remote_filesystem(__A )
assert is_remote is False
@pytest.mark.parametrize('compression_fs_class' , __A )
def lowerCAmelCase_ ( __A : Optional[int] , __A : int , __A : str , __A : Optional[Any] , __A : List[str] , __A : Optional[Any] , __A : Optional[int] ):
'''simple docstring'''
snake_case: Optional[Any] = {'gzip': gz_file, 'xz': xz_file, 'zstd': zstd_file, 'bz2': bza_file, 'lz4': lza_file}
snake_case: Optional[int] = input_paths[compression_fs_class.protocol]
if input_path is None:
snake_case: str = f"""for '{compression_fs_class.protocol}' compression protocol, """
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__A )
snake_case: List[str] = fsspec.filesystem(compression_fs_class.protocol , fo=__A )
assert isinstance(__A , __A )
snake_case: Any = os.path.basename(__A )
snake_case: int = expected_filename[: expected_filename.rindex('.' )]
assert fs.glob('*' ) == [expected_filename]
with fs.open(__A , 'r' , encoding='utf-8' ) as f, open(__A , encoding='utf-8' ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize('protocol' , ['zip', 'gzip'] )
def lowerCAmelCase_ ( __A : Any , __A : int , __A : int ):
'''simple docstring'''
snake_case: List[str] = {'zip': zip_jsonl_path, 'gzip': jsonl_gz_path}
snake_case: str = compressed_file_paths[protocol]
snake_case: Dict = 'dataset.jsonl'
snake_case: Optional[Any] = f"""{protocol}://{member_file_path}::{compressed_file_path}"""
snake_case: List[Any] = fsspec.get_fs_token_paths(__A )
assert fs.isfile(__A )
assert not fs.isfile('non_existing_' + member_file_path )
@pytest.mark.integration
def lowerCAmelCase_ ( __A : Optional[Any] , __A : Union[str, Any] , __A : Union[str, Any] , __A : List[Any] ):
'''simple docstring'''
snake_case: Tuple = hf_api.dataset_info(__A , token=__A )
snake_case: List[str] = HfFileSystem(repo_info=__A , token=__A )
assert sorted(hffs.glob('*' ) ) == [".gitattributes", "data"]
assert hffs.isdir('data' )
assert hffs.isfile('.gitattributes' ) and hffs.isfile('data/text_data.txt' )
with open(__A ) as f:
assert hffs.open('data/text_data.txt' , 'r' ).read() == f.read()
def lowerCAmelCase_ ( ):
'''simple docstring'''
snake_case: Union[str, Any] = 'bz2'
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(__A , __A , clobber=__A )
with pytest.warns(__A ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(__A ) == 1
assert (
str(warning_info[0].message )
== f"""A filesystem protocol was already set for {protocol} and will be overwritten."""
) | 720 |
'''simple docstring'''
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
__UpperCAmelCase = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: Dict = question_encoder
snake_case: Union[str, Any] = generator
snake_case: Optional[int] = self.question_encoder
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if os.path.isfile(SCREAMING_SNAKE_CASE__ ):
raise ValueError(F"""Provided path ({save_directory}) should be a directory, not a file""" )
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
snake_case: Tuple = os.path.join(SCREAMING_SNAKE_CASE__ , 'question_encoder_tokenizer' )
snake_case: Tuple = os.path.join(SCREAMING_SNAKE_CASE__ , 'generator_tokenizer' )
self.question_encoder.save_pretrained(SCREAMING_SNAKE_CASE__ )
self.generator.save_pretrained(SCREAMING_SNAKE_CASE__ )
@classmethod
def _UpperCamelCase ( cls , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
from ..auto.tokenization_auto import AutoTokenizer
snake_case: int = kwargs.pop('config' , SCREAMING_SNAKE_CASE__ )
if config is None:
snake_case: str = RagConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = AutoTokenizer.from_pretrained(
SCREAMING_SNAKE_CASE__ , config=config.question_encoder , subfolder='question_encoder_tokenizer' )
snake_case: Dict = AutoTokenizer.from_pretrained(
SCREAMING_SNAKE_CASE__ , config=config.generator , subfolder='generator_tokenizer' )
return cls(question_encoder=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ )
def __call__( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return self.current_tokenizer(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return self.generator.batch_decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return self.generator.decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Union[str, Any] = self.question_encoder
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Dict = self.generator
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = "longest" , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = True , **SCREAMING_SNAKE_CASE__ , ):
'''simple docstring'''
warnings.warn(
'`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the '
'regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` '
'context manager to prepare your targets. See the documentation of your specific tokenizer for more '
'details' , SCREAMING_SNAKE_CASE__ , )
if max_length is None:
snake_case: Optional[Any] = self.current_tokenizer.model_max_length
snake_case: int = self(
SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
snake_case: Any = self.current_tokenizer.model_max_length
snake_case: List[str] = self(
text_target=SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
snake_case: Dict = labels['input_ids']
return model_inputs | 692 | 0 |
'''simple docstring'''
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def lowerCAmelCase_ ( __A : Dict ):
'''simple docstring'''
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class SCREAMING_SNAKE_CASE ( snake_case ):
'''simple docstring'''
@staticmethod
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: Tuple = parser.add_parser('download' )
download_parser.add_argument(
'--cache-dir' , type=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ , help='Path to location to store the models' )
download_parser.add_argument(
'--force' , action='store_true' , help='Force the model to be download even if already in cache-dir' )
download_parser.add_argument(
'--trust-remote-code' , action='store_true' , help='Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine' , )
download_parser.add_argument('model' , type=SCREAMING_SNAKE_CASE__ , help='Name of the model to download' )
download_parser.set_defaults(func=SCREAMING_SNAKE_CASE__ )
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: Union[str, Any] = model
snake_case: Dict = cache
snake_case: Any = force
snake_case: Optional[Any] = trust_remote_code
def _UpperCamelCase ( self ):
'''simple docstring'''
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code ) | 721 |
'''simple docstring'''
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def lowerCAmelCase_ ( __A : Tuple ):
'''simple docstring'''
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def lowerCAmelCase_ ( ):
'''simple docstring'''
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def lowerCAmelCase_ ( ):
'''simple docstring'''
snake_case: Union[str, Any] = 'mock-s3-bucket'
snake_case: int = f"""s3://{mock_bucket}"""
snake_case: Any = extract_path_from_uri(__A )
assert dataset_path.startswith('s3://' ) is False
snake_case: Union[str, Any] = './local/path'
snake_case: Union[str, Any] = extract_path_from_uri(__A )
assert dataset_path == new_dataset_path
def lowerCAmelCase_ ( __A : Any ):
'''simple docstring'''
snake_case: List[str] = is_remote_filesystem(__A )
assert is_remote is True
snake_case: int = fsspec.filesystem('file' )
snake_case: int = is_remote_filesystem(__A )
assert is_remote is False
@pytest.mark.parametrize('compression_fs_class' , __A )
def lowerCAmelCase_ ( __A : Optional[int] , __A : int , __A : str , __A : Optional[Any] , __A : List[str] , __A : Optional[Any] , __A : Optional[int] ):
'''simple docstring'''
snake_case: Optional[Any] = {'gzip': gz_file, 'xz': xz_file, 'zstd': zstd_file, 'bz2': bza_file, 'lz4': lza_file}
snake_case: Optional[int] = input_paths[compression_fs_class.protocol]
if input_path is None:
snake_case: str = f"""for '{compression_fs_class.protocol}' compression protocol, """
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__A )
snake_case: List[str] = fsspec.filesystem(compression_fs_class.protocol , fo=__A )
assert isinstance(__A , __A )
snake_case: Any = os.path.basename(__A )
snake_case: int = expected_filename[: expected_filename.rindex('.' )]
assert fs.glob('*' ) == [expected_filename]
with fs.open(__A , 'r' , encoding='utf-8' ) as f, open(__A , encoding='utf-8' ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize('protocol' , ['zip', 'gzip'] )
def lowerCAmelCase_ ( __A : Any , __A : int , __A : int ):
'''simple docstring'''
snake_case: List[str] = {'zip': zip_jsonl_path, 'gzip': jsonl_gz_path}
snake_case: str = compressed_file_paths[protocol]
snake_case: Dict = 'dataset.jsonl'
snake_case: Optional[Any] = f"""{protocol}://{member_file_path}::{compressed_file_path}"""
snake_case , *snake_case: List[Any] = fsspec.get_fs_token_paths(__A )
assert fs.isfile(__A )
assert not fs.isfile('non_existing_' + member_file_path )
@pytest.mark.integration
def lowerCAmelCase_ ( __A : Optional[Any] , __A : Union[str, Any] , __A : Union[str, Any] , __A : List[Any] ):
'''simple docstring'''
snake_case: Tuple = hf_api.dataset_info(__A , token=__A )
snake_case: List[str] = HfFileSystem(repo_info=__A , token=__A )
assert sorted(hffs.glob('*' ) ) == [".gitattributes", "data"]
assert hffs.isdir('data' )
assert hffs.isfile('.gitattributes' ) and hffs.isfile('data/text_data.txt' )
with open(__A ) as f:
assert hffs.open('data/text_data.txt' , 'r' ).read() == f.read()
def lowerCAmelCase_ ( ):
'''simple docstring'''
snake_case: Union[str, Any] = 'bz2'
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(__A , __A , clobber=__A )
with pytest.warns(__A ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(__A ) == 1
assert (
str(warning_info[0].message )
== f"""A filesystem protocol was already set for {protocol} and will be overwritten."""
) | 692 | 0 |
from __future__ import annotations
__UpperCAmelCase = "#"
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self ):
'''simple docstring'''
snake_case: dict = {}
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: str = self._trie
for char in text:
if char not in trie:
snake_case: Optional[int] = {}
snake_case: Any = trie[char]
snake_case: Optional[Any] = True
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: str = self._trie
for char in prefix:
if char in trie:
snake_case: int = trie[char]
else:
return []
return self._elements(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: List[Any] = []
for c, v in d.items():
snake_case: Union[str, Any] = [' '] if c == END else [(c + s) for s in self._elements(SCREAMING_SNAKE_CASE__ )]
result.extend(SCREAMING_SNAKE_CASE__ )
return tuple(SCREAMING_SNAKE_CASE__ )
__UpperCAmelCase = Trie()
__UpperCAmelCase = ("depart", "detergent", "daring", "dog", "deer", "deal")
for word in words:
trie.insert_word(word)
def lowerCAmelCase_ ( __A : str ):
'''simple docstring'''
snake_case: List[str] = trie.find_word(__A )
return tuple(string + word for word in suffixes )
def lowerCAmelCase_ ( ):
'''simple docstring'''
print(autocomplete_using_trie('de' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 700 |
'''simple docstring'''
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.17.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
__UpperCAmelCase = logging.getLogger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
__UpperCamelCase = field(
default="tab_fact" , metadata={"help": "The name of the dataset to use (via the datasets library)."} )
__UpperCamelCase = field(
default="tab_fact" , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} , )
__UpperCamelCase = field(
default=1024 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__UpperCamelCase = field(
default=snake_case , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
__UpperCamelCase = field(
default=snake_case , metadata={
"help": (
"Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
)
} , )
__UpperCamelCase = field(
default=snake_case , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
__UpperCamelCase = field(
default=snake_case , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
__UpperCamelCase = field(
default=snake_case , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
)
} , )
__UpperCamelCase = field(
default=snake_case , metadata={"help": "A csv or a json file containing the training data."} )
__UpperCamelCase = field(
default=snake_case , metadata={"help": "A csv or a json file containing the validation data."} )
__UpperCamelCase = field(default=snake_case , metadata={"help": "A csv or a json file containing the test data."} )
def _UpperCamelCase ( self ):
'''simple docstring'''
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('Need either a GLUE task, a training/validation file or a dataset name.' )
else:
snake_case: str = self.train_file.split('.' )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
snake_case: Optional[Any] = self.validation_file.split('.' )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
__UpperCamelCase = field(
default=snake_case , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
__UpperCamelCase = field(
default=snake_case , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
__UpperCamelCase = field(
default=snake_case , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
__UpperCamelCase = field(
default=snake_case , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
__UpperCamelCase = field(
default=snake_case , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
__UpperCamelCase = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
__UpperCamelCase = field(
default=snake_case , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
def lowerCAmelCase_ ( ):
'''simple docstring'''
snake_case: Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
snake_case , snake_case , snake_case: Union[str, Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
snake_case , snake_case , snake_case: str = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
snake_case: Tuple = training_args.get_process_log_level()
logger.setLevel(__A )
datasets.utils.logging.set_verbosity(__A )
transformers.utils.logging.set_verbosity(__A )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
snake_case: Any = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
snake_case: List[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
snake_case: int = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
snake_case: Optional[int] = {'train': data_args.train_file, 'validation': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
snake_case: Tuple = data_args.train_file.split('.' )[-1]
snake_case: Union[str, Any] = data_args.test_file.split('.' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
snake_case: Union[str, Any] = data_args.test_file
else:
raise ValueError('Need either a GLUE task or a test file for `do_predict`.' )
for key in data_files.keys():
logger.info(f"""load a local file for {key}: {data_files[key]}""" )
if data_args.train_file.endswith('.csv' ):
# Loading a dataset from local csv files
snake_case: List[Any] = load_dataset('csv' , data_files=__A , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
snake_case: Optional[Any] = load_dataset('json' , data_files=__A , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
snake_case: Tuple = raw_datasets['train'].features['label'].names
snake_case: List[str] = len(__A )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case: Tuple = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__A , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
snake_case: List[str] = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=__A , )
snake_case: Union[str, Any] = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=__A , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
snake_case: int = 'max_length'
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
snake_case: Union[str, Any] = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
snake_case: Optional[Any] = {'Refused': 0, 'Entailed': 1}
snake_case: List[Any] = {0: 'Refused', 1: 'Entailed'}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
snake_case: List[str] = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(__A : Any ):
# Tokenize the texts
def _convert_table_text_to_pandas(__A : Dict ):
snake_case: str = [_table_row.split('#' ) for _table_row in _table_text.strip('\n' ).split('\n' )]
snake_case: List[str] = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
snake_case: str = examples['statement']
snake_case: int = list(map(_convert_table_text_to_pandas , examples['table_text'] ) )
snake_case: List[Any] = tokenizer(__A , __A , padding=__A , max_length=__A , truncation=__A )
snake_case: List[Any] = examples['label']
return result
with training_args.main_process_first(desc='dataset map pre-processing' ):
snake_case: int = raw_datasets.map(
__A , batched=__A , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on dataset' , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('--do_train requires a train dataset' )
snake_case: List[str] = raw_datasets['train']
if data_args.max_train_samples is not None:
snake_case: Tuple = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('--do_eval requires a validation dataset' )
snake_case: Any = raw_datasets['validation']
if data_args.max_eval_samples is not None:
snake_case: Optional[int] = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('--do_predict requires a test dataset' )
snake_case: str = raw_datasets['test']
if data_args.max_predict_samples is not None:
snake_case: List[str] = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(__A ) ) , 3 ):
logger.info(f"""Sample {index} of the training set: {train_dataset[index]}.""" )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(__A : EvalPrediction ):
snake_case: int = p.predictions[0] if isinstance(p.predictions , __A ) else p.predictions
snake_case: List[str] = np.argmax(__A , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
snake_case: str = default_data_collator
elif training_args.fpaa:
snake_case: List[str] = DataCollatorWithPadding(__A , pad_to_multiple_of=8 )
else:
snake_case: List[Any] = None
# Initialize our Trainer
snake_case: List[str] = Trainer(
model=__A , args=__A , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=__A , tokenizer=__A , data_collator=__A , )
# Training
if training_args.do_train:
snake_case: Optional[int] = None
if training_args.resume_from_checkpoint is not None:
snake_case: str = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
snake_case: Optional[Any] = last_checkpoint
snake_case: Union[str, Any] = trainer.train(resume_from_checkpoint=__A )
snake_case: List[Any] = train_result.metrics
snake_case: List[Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__A )
)
snake_case: Optional[Any] = min(__A , len(__A ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('train' , __A )
trainer.save_metrics('train' , __A )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
snake_case: Dict = trainer.evaluate(eval_dataset=__A )
snake_case: Optional[int] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__A )
snake_case: Dict = min(__A , len(__A ) )
trainer.log_metrics('eval' , __A )
trainer.save_metrics('eval' , __A )
if training_args.do_predict:
logger.info('*** Predict ***' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
snake_case: Optional[int] = predict_dataset.remove_columns('label' )
snake_case: str = trainer.predict(__A , metric_key_prefix='predict' ).predictions
snake_case: Any = np.argmax(__A , axis=1 )
snake_case: int = os.path.join(training_args.output_dir , 'predict_results_tabfact.txt' )
if trainer.is_world_process_zero():
with open(__A , 'w' ) as writer:
logger.info('***** Predict Results *****' )
writer.write('index\tprediction\n' )
for index, item in enumerate(__A ):
snake_case: int = label_list[item]
writer.write(f"""{index}\t{item}\n""" )
snake_case: Optional[int] = {'finetuned_from': model_args.model_name_or_path, 'tasks': 'text-classification'}
if training_args.push_to_hub:
trainer.push_to_hub(**__A )
else:
trainer.create_model_card(**__A )
def lowerCAmelCase_ ( __A : Optional[Any] ):
'''simple docstring'''
main()
if __name__ == "__main__":
main() | 692 | 0 |
'''simple docstring'''
import os
def lowerCAmelCase_ ( __A : str = "input.txt" ):
'''simple docstring'''
with open(os.path.join(os.path.dirname(__A ) , __A ) ) as input_file:
snake_case: str = [
[int(__A ) for element in line.split(',' )]
for line in input_file.readlines()
]
snake_case: Optional[Any] = len(__A )
snake_case: Optional[Any] = len(matrix[0] )
snake_case: List[str] = [[-1 for _ in range(__A )] for _ in range(__A )]
for i in range(__A ):
snake_case: Dict = matrix[i][0]
for j in range(1 , __A ):
for i in range(__A ):
snake_case: Any = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , __A ):
snake_case: Any = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
snake_case: int = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(F'{solution() = }') | 701 |
'''simple docstring'''
import math
def lowerCAmelCase_ ( __A : int ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__A ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCAmelCase_ ( __A : float = 0.1 ):
'''simple docstring'''
snake_case: Optional[int] = 3
snake_case: int = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(__A )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod() | 692 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase = {
"configuration_instructblip": [
"INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"InstructBlipConfig",
"InstructBlipQFormerConfig",
"InstructBlipVisionConfig",
],
"processing_instructblip": ["InstructBlipProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"InstructBlipQFormerModel",
"InstructBlipPreTrainedModel",
"InstructBlipForConditionalGeneration",
"InstructBlipVisionModel",
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 702 |
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
__UpperCAmelCase = "pt"
elif is_tf_available():
__UpperCAmelCase = "tf"
else:
__UpperCAmelCase = "jax"
class SCREAMING_SNAKE_CASE ( snake_case , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = ByTaTokenizer
__UpperCamelCase = False
def _UpperCamelCase ( self ):
'''simple docstring'''
super().setUp()
snake_case: int = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _UpperCamelCase ( self ):
'''simple docstring'''
return ByTaTokenizer.from_pretrained('google/byt5-small' )
def _UpperCamelCase ( self , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=20 , SCREAMING_SNAKE_CASE__=5 ):
'''simple docstring'''
snake_case: Optional[Any] = []
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
try:
snake_case: Optional[Any] = tokenizer.decode([i] , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
snake_case: List[str] = list(filter(lambda SCREAMING_SNAKE_CASE__ : re.match(r'^[ a-zA-Z]+$' , t[1] ) , SCREAMING_SNAKE_CASE__ ) )
snake_case: str = list(filter(lambda SCREAMING_SNAKE_CASE__ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ) )
if max_length is not None and len(SCREAMING_SNAKE_CASE__ ) > max_length:
snake_case: Union[str, Any] = toks[:max_length]
if min_length is not None and len(SCREAMING_SNAKE_CASE__ ) < min_length and len(SCREAMING_SNAKE_CASE__ ) > 0:
while len(SCREAMING_SNAKE_CASE__ ) < min_length:
snake_case: Tuple = toks + toks
# toks_str = [t[1] for t in toks]
snake_case: Dict = [t[0] for t in toks]
# Ensure consistency
snake_case: int = tokenizer.decode(SCREAMING_SNAKE_CASE__ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__ )
if " " not in output_txt and len(SCREAMING_SNAKE_CASE__ ) > 1:
snake_case: str = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__ )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__ )
)
if with_prefix_space:
snake_case: Tuple = ' ' + output_txt
snake_case: Optional[int] = tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
return output_txt, output_ids
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Any = self.ta_base_tokenizer
snake_case: str = tokenizer(['hi</s>', 'I went to the gym</s>', '</s>'] )
snake_case: List[Any] = tokenizer(['hi', 'I went to the gym', ''] )
self.assertListEqual(batch_with_eos_added['input_ids'] , batch_without_eos_added['input_ids'] )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Any = self.ta_base_tokenizer
snake_case: Union[str, Any] = 'Unicode €.'
snake_case: str = tokenizer(SCREAMING_SNAKE_CASE__ )
snake_case: Tuple = [88, 1_13, 1_08, 1_02, 1_14, 1_03, 1_04, 35, 2_29, 1_33, 1_75, 49, 1]
self.assertEqual(encoded['input_ids'] , SCREAMING_SNAKE_CASE__ )
# decoding
snake_case: List[str] = tokenizer.decode(SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , 'Unicode €.</s>' )
snake_case: List[Any] = tokenizer('e è é ê ë' )
snake_case: Optional[Any] = [1_04, 35, 1_98, 1_71, 35, 1_98, 1_72, 35, 1_98, 1_73, 35, 1_98, 1_74, 1]
self.assertEqual(encoded['input_ids'] , SCREAMING_SNAKE_CASE__ )
# decoding
snake_case: List[Any] = tokenizer.decode(SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , 'e è é ê ë</s>' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , 'e è é ê ë</s>' )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: str = self.ta_base_tokenizer
snake_case: Optional[Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
snake_case: Optional[int] = [68, 35, 1_11, 1_14, 1_13, 1_06, 35, 1_15, 1_00, 1_17, 1_00, 1_06, 1_17, 1_00, 1_15, 1_07, 35, 1_05, 1_14, 1_17, 35, 1_18, 1_20, 1_12, 1_12, 1_00, 1_17, 1_08, 1_25, 1_00, 1_19, 1_08, 1_14, 1_13, 49, 1, 0]
# fmt: on
snake_case: str = tokenizer(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if FRAMEWORK != "jax":
snake_case: Optional[Any] = list(batch.input_ids.numpy()[0] )
else:
snake_case: Dict = list(batch.input_ids.tolist()[0] )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Dict = self.ta_base_tokenizer
snake_case: List[str] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
snake_case: Union[str, Any] = tokenizer(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , SCREAMING_SNAKE_CASE__ )
self.assertIn('attention_mask' , SCREAMING_SNAKE_CASE__ )
self.assertNotIn('decoder_input_ids' , SCREAMING_SNAKE_CASE__ )
self.assertNotIn('decoder_attention_mask' , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[str] = self.ta_base_tokenizer
snake_case: str = [
'Summary of the text.',
'Another summary.',
]
snake_case: Dict = tokenizer(
text_target=SCREAMING_SNAKE_CASE__ , max_length=32 , padding='max_length' , truncation=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ )
self.assertEqual(32 , targets['input_ids'].shape[1] )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Union[str, Any] = self.ta_base_tokenizer
snake_case: Optional[int] = ['A long paragraph for summarization. </s>']
snake_case: str = ['Summary of the text. </s>']
# fmt: off
snake_case: str = [68, 35, 1_11, 1_14, 1_13, 1_06, 35, 1_15, 1_00, 1_17, 1_00, 1_06, 1_17, 1_00, 1_15, 1_07, 35, 1_05, 1_14, 1_17, 35, 1_18, 1_20, 1_12, 1_12, 1_00, 1_17, 1_08, 1_25, 1_00, 1_19, 1_08, 1_14, 1_13, 49, 35, 1]
snake_case: Optional[int] = [86, 1_20, 1_12, 1_12, 1_00, 1_17, 1_24, 35, 1_14, 1_05, 35, 1_19, 1_07, 1_04, 35, 1_19, 1_04, 1_23, 1_19, 49, 35, 1]
# fmt: on
snake_case: List[Any] = tokenizer(SCREAMING_SNAKE_CASE__ , text_target=SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , batch['input_ids'][0] )
self.assertEqual(SCREAMING_SNAKE_CASE__ , batch['labels'][0] )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Any = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
snake_case: Optional[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
snake_case: Union[str, Any] = tempfile.mkdtemp()
snake_case: Dict = ' He is very happy, UNwant\u00E9d,running'
snake_case: Optional[int] = tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = tokenizer.__class__.from_pretrained(SCREAMING_SNAKE_CASE__ )
snake_case: Any = after_tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
shutil.rmtree(SCREAMING_SNAKE_CASE__ )
snake_case: Any = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
snake_case: List[str] = tempfile.mkdtemp()
snake_case: str = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
snake_case: List[str] = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
snake_case: int = tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
snake_case: Tuple = tokenizer.__class__.from_pretrained(SCREAMING_SNAKE_CASE__ )
snake_case: Tuple = after_tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
snake_case: Union[str, Any] = tokenizer.__class__.from_pretrained(SCREAMING_SNAKE_CASE__ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Union[str, Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(SCREAMING_SNAKE_CASE__ )
with open(os.path.join(SCREAMING_SNAKE_CASE__ , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
snake_case: Any = json.load(SCREAMING_SNAKE_CASE__ )
with open(os.path.join(SCREAMING_SNAKE_CASE__ , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
snake_case: str = json.load(SCREAMING_SNAKE_CASE__ )
snake_case: int = [F"""<extra_id_{i}>""" for i in range(1_25 )]
snake_case: Optional[int] = added_tokens_extra_ids + [
'an_additional_special_token'
]
snake_case: str = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(SCREAMING_SNAKE_CASE__ , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
with open(os.path.join(SCREAMING_SNAKE_CASE__ , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
snake_case: Dict = tokenizer_class.from_pretrained(
SCREAMING_SNAKE_CASE__ , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
snake_case: Union[str, Any] = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=SCREAMING_SNAKE_CASE__ )]
snake_case: Union[str, Any] = tokenizer_class.from_pretrained(
SCREAMING_SNAKE_CASE__ , additional_special_tokens=SCREAMING_SNAKE_CASE__ , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[str] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(SCREAMING_SNAKE_CASE__ )
snake_case: Optional[Any] = tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertTrue(tokenizer.decode([2_55] ) == '' )
def _UpperCamelCase ( self ):
'''simple docstring'''
pass
def _UpperCamelCase ( self ):
'''simple docstring'''
pass
def _UpperCamelCase ( self ):
'''simple docstring'''
pass
def _UpperCamelCase ( self ):
'''simple docstring'''
pass
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Dict = self.get_tokenizers(fast=SCREAMING_SNAKE_CASE__ , do_lower_case=SCREAMING_SNAKE_CASE__ )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
snake_case: Union[str, Any] = ['t', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 'x', 't', '</s>']
snake_case: List[str] = tokenizer.convert_tokens_to_string(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: int = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
snake_case: Optional[Any] = [
'bos_token',
'eos_token',
'unk_token',
'sep_token',
'pad_token',
'cls_token',
'mask_token',
]
snake_case: Dict = 0
snake_case: List[Any] = tokenizer.convert_ids_to_tokens(
SCREAMING_SNAKE_CASE__ , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
for attr in attributes_list:
setattr(SCREAMING_SNAKE_CASE__ , attr + '_id' , SCREAMING_SNAKE_CASE__ )
self.assertEqual(getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
self.assertEqual(getattr(SCREAMING_SNAKE_CASE__ , attr + '_id' ) , SCREAMING_SNAKE_CASE__ )
setattr(SCREAMING_SNAKE_CASE__ , attr + '_id' , SCREAMING_SNAKE_CASE__ )
self.assertEqual(getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
self.assertEqual(getattr(SCREAMING_SNAKE_CASE__ , attr + '_id' ) , SCREAMING_SNAKE_CASE__ )
setattr(SCREAMING_SNAKE_CASE__ , 'additional_special_tokens_ids' , [] )
self.assertListEqual(getattr(SCREAMING_SNAKE_CASE__ , 'additional_special_tokens' ) , [] )
self.assertListEqual(getattr(SCREAMING_SNAKE_CASE__ , 'additional_special_tokens_ids' ) , [] )
setattr(SCREAMING_SNAKE_CASE__ , 'additional_special_tokens_ids' , [token_id_to_test_setters] )
self.assertListEqual(getattr(SCREAMING_SNAKE_CASE__ , 'additional_special_tokens' ) , [token_to_test_setters] )
self.assertListEqual(getattr(SCREAMING_SNAKE_CASE__ , 'additional_special_tokens_ids' ) , [token_id_to_test_setters] ) | 692 | 0 |
'''simple docstring'''
import math
def lowerCAmelCase_ ( __A : int ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__A ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCAmelCase_ ( __A : float = 0.1 ):
'''simple docstring'''
snake_case: Optional[int] = 3
snake_case: int = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(__A )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod() | 703 |
'''simple docstring'''
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = "geglu" , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = "layer_norm" , SCREAMING_SNAKE_CASE__ = False , ):
'''simple docstring'''
super().__init__()
snake_case: List[str] = only_cross_attention
snake_case: Optional[Any] = (num_embeds_ada_norm is not None) and norm_type == 'ada_norm_zero'
snake_case: Tuple = (num_embeds_ada_norm is not None) and norm_type == 'ada_norm'
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
F"""`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to"""
F""" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.""" )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
snake_case: List[str] = AdaLayerNorm(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif self.use_ada_layer_norm_zero:
snake_case: str = AdaLayerNormZero(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
snake_case: int = nn.LayerNorm(SCREAMING_SNAKE_CASE__ , elementwise_affine=SCREAMING_SNAKE_CASE__ )
snake_case: List[str] = Attention(
query_dim=SCREAMING_SNAKE_CASE__ , heads=SCREAMING_SNAKE_CASE__ , dim_head=SCREAMING_SNAKE_CASE__ , dropout=SCREAMING_SNAKE_CASE__ , bias=SCREAMING_SNAKE_CASE__ , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=SCREAMING_SNAKE_CASE__ , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
snake_case: Tuple = (
AdaLayerNorm(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if self.use_ada_layer_norm
else nn.LayerNorm(SCREAMING_SNAKE_CASE__ , elementwise_affine=SCREAMING_SNAKE_CASE__ )
)
snake_case: Any = Attention(
query_dim=SCREAMING_SNAKE_CASE__ , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=SCREAMING_SNAKE_CASE__ , dim_head=SCREAMING_SNAKE_CASE__ , dropout=SCREAMING_SNAKE_CASE__ , bias=SCREAMING_SNAKE_CASE__ , upcast_attention=SCREAMING_SNAKE_CASE__ , ) # is self-attn if encoder_hidden_states is none
else:
snake_case: int = None
snake_case: Tuple = None
# 3. Feed-forward
snake_case: Union[str, Any] = nn.LayerNorm(SCREAMING_SNAKE_CASE__ , elementwise_affine=SCREAMING_SNAKE_CASE__ )
snake_case: List[str] = FeedForward(SCREAMING_SNAKE_CASE__ , dropout=SCREAMING_SNAKE_CASE__ , activation_fn=SCREAMING_SNAKE_CASE__ , final_dropout=SCREAMING_SNAKE_CASE__ )
# let chunk size default to None
snake_case: Any = None
snake_case: Any = 0
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: Dict = chunk_size
snake_case: str = dim
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , ):
'''simple docstring'''
if self.use_ada_layer_norm:
snake_case: Optional[int] = self.norma(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif self.use_ada_layer_norm_zero:
snake_case , snake_case , snake_case , snake_case , snake_case: int = self.norma(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , hidden_dtype=hidden_states.dtype )
else:
snake_case: List[str] = self.norma(SCREAMING_SNAKE_CASE__ )
snake_case: Optional[Any] = cross_attention_kwargs if cross_attention_kwargs is not None else {}
snake_case: List[str] = self.attna(
SCREAMING_SNAKE_CASE__ , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
if self.use_ada_layer_norm_zero:
snake_case: Tuple = gate_msa.unsqueeze(1 ) * attn_output
snake_case: List[str] = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
snake_case: Dict = (
self.norma(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if self.use_ada_layer_norm else self.norma(SCREAMING_SNAKE_CASE__ )
)
snake_case: Any = self.attna(
SCREAMING_SNAKE_CASE__ , encoder_hidden_states=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
snake_case: List[str] = attn_output + hidden_states
# 3. Feed-forward
snake_case: str = self.norma(SCREAMING_SNAKE_CASE__ )
if self.use_ada_layer_norm_zero:
snake_case: str = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
F"""`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.""" )
snake_case: List[str] = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
snake_case: Optional[Any] = torch.cat(
[self.ff(SCREAMING_SNAKE_CASE__ ) for hid_slice in norm_hidden_states.chunk(SCREAMING_SNAKE_CASE__ , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
snake_case: int = self.ff(SCREAMING_SNAKE_CASE__ )
if self.use_ada_layer_norm_zero:
snake_case: Union[str, Any] = gate_mlp.unsqueeze(1 ) * ff_output
snake_case: Tuple = ff_output + hidden_states
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = 4 , SCREAMING_SNAKE_CASE__ = 0.0 , SCREAMING_SNAKE_CASE__ = "geglu" , SCREAMING_SNAKE_CASE__ = False , ):
'''simple docstring'''
super().__init__()
snake_case: int = int(dim * mult )
snake_case: Optional[Any] = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
snake_case: int = GELU(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if activation_fn == "gelu-approximate":
snake_case: Optional[Any] = GELU(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , approximate='tanh' )
elif activation_fn == "geglu":
snake_case: List[Any] = GEGLU(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif activation_fn == "geglu-approximate":
snake_case: Optional[int] = ApproximateGELU(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case: Any = nn.ModuleList([] )
# project in
self.net.append(SCREAMING_SNAKE_CASE__ )
# project dropout
self.net.append(nn.Dropout(SCREAMING_SNAKE_CASE__ ) )
# project out
self.net.append(nn.Linear(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(SCREAMING_SNAKE_CASE__ ) )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
for module in self.net:
snake_case: Optional[int] = module(SCREAMING_SNAKE_CASE__ )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = "none" ):
'''simple docstring'''
super().__init__()
snake_case: Optional[int] = nn.Linear(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = approximate
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if gate.device.type != "mps":
return F.gelu(SCREAMING_SNAKE_CASE__ , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: Dict = self.proj(SCREAMING_SNAKE_CASE__ )
snake_case: Dict = self.gelu(SCREAMING_SNAKE_CASE__ )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
super().__init__()
snake_case: Tuple = nn.Linear(SCREAMING_SNAKE_CASE__ , dim_out * 2 )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if gate.device.type != "mps":
return F.gelu(SCREAMING_SNAKE_CASE__ )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case , snake_case: int = self.proj(SCREAMING_SNAKE_CASE__ ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(SCREAMING_SNAKE_CASE__ )
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
super().__init__()
snake_case: str = nn.Linear(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: Optional[int] = self.proj(SCREAMING_SNAKE_CASE__ )
return x * torch.sigmoid(1.7_02 * x )
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
super().__init__()
snake_case: Optional[Any] = nn.Embedding(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case: Optional[Any] = nn.SiLU()
snake_case: Union[str, Any] = nn.Linear(SCREAMING_SNAKE_CASE__ , embedding_dim * 2 )
snake_case: int = nn.LayerNorm(SCREAMING_SNAKE_CASE__ , elementwise_affine=SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: Dict = self.linear(self.silu(self.emb(SCREAMING_SNAKE_CASE__ ) ) )
snake_case , snake_case: Dict = torch.chunk(SCREAMING_SNAKE_CASE__ , 2 )
snake_case: str = self.norm(SCREAMING_SNAKE_CASE__ ) * (1 + scale) + shift
return x
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
super().__init__()
snake_case: str = CombinedTimestepLabelEmbeddings(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case: int = nn.SiLU()
snake_case: Any = nn.Linear(SCREAMING_SNAKE_CASE__ , 6 * embedding_dim , bias=SCREAMING_SNAKE_CASE__ )
snake_case: List[Any] = nn.LayerNorm(SCREAMING_SNAKE_CASE__ , elementwise_affine=SCREAMING_SNAKE_CASE__ , eps=1E-6 )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
snake_case: int = self.linear(self.silu(self.emb(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , hidden_dtype=SCREAMING_SNAKE_CASE__ ) ) )
snake_case , snake_case , snake_case , snake_case , snake_case , snake_case: str = emb.chunk(6 , dim=1 )
snake_case: Dict = self.norm(SCREAMING_SNAKE_CASE__ ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = 1E-5 ):
'''simple docstring'''
super().__init__()
snake_case: str = num_groups
snake_case: str = eps
if act_fn is None:
snake_case: Dict = None
else:
snake_case: List[str] = get_activation(SCREAMING_SNAKE_CASE__ )
snake_case: Any = nn.Linear(SCREAMING_SNAKE_CASE__ , out_dim * 2 )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if self.act:
snake_case: Optional[Any] = self.act(SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = self.linear(SCREAMING_SNAKE_CASE__ )
snake_case: List[Any] = emb[:, :, None, None]
snake_case , snake_case: List[Any] = emb.chunk(2 , dim=1 )
snake_case: Any = F.group_norm(SCREAMING_SNAKE_CASE__ , self.num_groups , eps=self.eps )
snake_case: Optional[int] = x * (1 + scale) + shift
return x | 692 | 0 |
'''simple docstring'''
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
__UpperCAmelCase = False
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__=32 ):
'''simple docstring'''
set_seed(0 )
snake_case: Tuple = UNetaDModel(sample_size=SCREAMING_SNAKE_CASE__ , in_channels=3 , out_channels=3 )
snake_case: Tuple = torch.optim.SGD(model.parameters() , lr=0.00_01 )
return model, optimizer
@slow
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: int = 'cpu' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
snake_case: Tuple = DDPMScheduler(
num_train_timesteps=10_00 , beta_start=0.00_01 , beta_end=0.02 , beta_schedule='linear' , clip_sample=SCREAMING_SNAKE_CASE__ , )
snake_case: str = DDIMScheduler(
num_train_timesteps=10_00 , beta_start=0.00_01 , beta_end=0.02 , beta_schedule='linear' , clip_sample=SCREAMING_SNAKE_CASE__ , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
snake_case: str = [torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(SCREAMING_SNAKE_CASE__ ) for _ in range(4 )]
snake_case: Union[str, Any] = [torch.randn((4, 3, 32, 32) ).to(SCREAMING_SNAKE_CASE__ ) for _ in range(4 )]
snake_case: Dict = [torch.randint(0 , 10_00 , (4,) ).long().to(SCREAMING_SNAKE_CASE__ ) for _ in range(4 )]
# train with a DDPM scheduler
snake_case: Union[str, Any] = self.get_model_optimizer(resolution=32 )
model.train().to(SCREAMING_SNAKE_CASE__ )
for i in range(4 ):
optimizer.zero_grad()
snake_case: Any = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
snake_case: Optional[Any] = model(SCREAMING_SNAKE_CASE__ , timesteps[i] ).sample
snake_case: List[str] = torch.nn.functional.mse_loss(SCREAMING_SNAKE_CASE__ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
snake_case: Dict = self.get_model_optimizer(resolution=32 )
model.train().to(SCREAMING_SNAKE_CASE__ )
for i in range(4 ):
optimizer.zero_grad()
snake_case: Optional[int] = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
snake_case: Union[str, Any] = model(SCREAMING_SNAKE_CASE__ , timesteps[i] ).sample
snake_case: Tuple = torch.nn.functional.mse_loss(SCREAMING_SNAKE_CASE__ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1E-5 ) )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1E-5 ) )
| 704 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class SCREAMING_SNAKE_CASE ( snake_case , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = RoCBertTokenizer
__UpperCamelCase = None
__UpperCamelCase = False
__UpperCamelCase = True
__UpperCamelCase = filter_non_english
def _UpperCamelCase ( self ):
'''simple docstring'''
super().setUp()
snake_case: Any = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', '你', '好', '是', '谁', 'a', 'b', 'c', 'd']
snake_case: List[Any] = {}
snake_case: List[str] = {}
for i, value in enumerate(SCREAMING_SNAKE_CASE__ ):
snake_case: List[Any] = i
snake_case: Union[str, Any] = i
snake_case: List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
snake_case: Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['word_shape_file'] )
snake_case: str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['word_pronunciation_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
with open(self.word_shape_file , 'w' , encoding='utf-8' ) as word_shape_writer:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ensure_ascii=SCREAMING_SNAKE_CASE__ )
with open(self.word_pronunciation_file , 'w' , encoding='utf-8' ) as word_pronunciation_writer:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ensure_ascii=SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Dict = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
snake_case: Dict = tokenizer.tokenize('你好[SEP]你是谁' )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , ['你', '好', '[SEP]', '你', '是', '谁'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(SCREAMING_SNAKE_CASE__ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(SCREAMING_SNAKE_CASE__ ) , [5, 6, 2, 5, 7, 8] )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Any = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[int] = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Union[str, Any] = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ , strip_accents=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: str = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ , strip_accents=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[int] = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: int = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Tuple = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ , strip_accents=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Any = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ , strip_accents=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: str = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Union[str, Any] = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
snake_case: Union[str, Any] = {}
for i, token in enumerate(SCREAMING_SNAKE_CASE__ ):
snake_case: str = i
snake_case: Optional[int] = RoCBertWordpieceTokenizer(vocab=SCREAMING_SNAKE_CASE__ , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
def _UpperCamelCase ( self ):
'''simple docstring'''
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def _UpperCamelCase ( self ):
'''simple docstring'''
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def _UpperCamelCase ( self ):
'''simple docstring'''
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[int] = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(SCREAMING_SNAKE_CASE__ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
if self.test_rust_tokenizer:
snake_case: int = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE__ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
def _UpperCamelCase ( self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
snake_case: Tuple = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
snake_case: Optional[Any] = F"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
snake_case: List[str] = tokenizer_r.encode_plus(
SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , return_token_type_ids=SCREAMING_SNAKE_CASE__ , return_offsets_mapping=SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , )
snake_case: Optional[int] = tokenizer_r.do_lower_case if hasattr(SCREAMING_SNAKE_CASE__ , 'do_lower_case' ) else False
snake_case: int = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'Allen'),
((21, 23), '##NL'),
((23, 24), '##P'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'allen'),
((21, 23), '##nl'),
((23, 24), '##p'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Dict = ['的', '人', '有']
snake_case: Any = ''.join(SCREAMING_SNAKE_CASE__ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
snake_case: Tuple = True
snake_case: List[Any] = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
snake_case: List[Any] = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = tokenizer_p.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
snake_case: str = tokenizer_r.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = tokenizer_r.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ )
snake_case: str = tokenizer_p.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case: Tuple = False
snake_case: Union[str, Any] = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
snake_case: Union[str, Any] = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
snake_case: Union[str, Any] = tokenizer_r.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
snake_case: int = tokenizer_p.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = tokenizer_r.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ )
snake_case: Union[str, Any] = tokenizer_p.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ )
# it is expected that only the first Chinese character is not preceded by "##".
snake_case: Union[str, Any] = [
F"""##{token}""" if idx != 0 else token for idx, token in enumerate(SCREAMING_SNAKE_CASE__ )
]
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@slow
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: int = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
snake_case: int = tokenizer.encode('你好' , add_special_tokens=SCREAMING_SNAKE_CASE__ )
snake_case: Any = tokenizer.encode('你是谁' , add_special_tokens=SCREAMING_SNAKE_CASE__ )
snake_case: Tuple = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE__ )
snake_case: str = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[str] = self.get_tokenizers(do_lower_case=SCREAMING_SNAKE_CASE__ )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
snake_case: Dict = '你好,你是谁'
snake_case: int = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
snake_case: Union[str, Any] = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
snake_case: List[str] = tokenizer.convert_tokens_to_shape_ids(SCREAMING_SNAKE_CASE__ )
snake_case: Union[str, Any] = tokenizer.convert_tokens_to_pronunciation_ids(SCREAMING_SNAKE_CASE__ )
snake_case: Optional[Any] = tokenizer.prepare_for_model(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = tokenizer.encode_plus(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) | 692 | 0 |
'''simple docstring'''
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=13 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=2_24 , SCREAMING_SNAKE_CASE__=10_00 , SCREAMING_SNAKE_CASE__=[3, 3, 6, 4] , SCREAMING_SNAKE_CASE__=[48, 56, 1_12, 2_20] , ):
'''simple docstring'''
snake_case: Optional[Any] = parent
snake_case: List[str] = batch_size
snake_case: Tuple = num_channels
snake_case: int = is_training
snake_case: Union[str, Any] = use_labels
snake_case: Any = hidden_dropout_prob
snake_case: List[Any] = attention_probs_dropout_prob
snake_case: Optional[Any] = num_labels
snake_case: Optional[Any] = image_size
snake_case: List[Any] = layer_depths
snake_case: Optional[int] = embed_dims
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case: List[str] = None
if self.use_labels:
snake_case: List[Any] = ids_tensor([self.batch_size] , self.num_labels )
snake_case: Dict = self.get_config()
return config, pixel_values, labels
def _UpperCamelCase ( self ):
'''simple docstring'''
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act='gelu' , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=SCREAMING_SNAKE_CASE__ , layer_scale_init_value=1E-5 , )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: List[str] = SwiftFormerModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
snake_case: List[str] = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: Optional[Any] = self.num_labels
snake_case: int = SwiftFormerForImageClassification(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
snake_case: Optional[int] = model(SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
snake_case: Any = SwiftFormerForImageClassification(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
snake_case: Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case: Optional[Any] = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCamelCase ( self ):
'''simple docstring'''
(snake_case): Union[str, Any] = self.prepare_config_and_inputs()
snake_case: Union[str, Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( snake_case , snake_case , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
__UpperCamelCase = (
{"feature-extraction": SwiftFormerModel, "image-classification": SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Any = SwiftFormerModelTester(self )
snake_case: Optional[int] = ConfigTester(
self , config_class=SCREAMING_SNAKE_CASE__ , has_text_modality=SCREAMING_SNAKE_CASE__ , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def _UpperCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='SwiftFormer does not use inputs_embeds' )
def _UpperCamelCase ( self ):
'''simple docstring'''
pass
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case: Optional[int] = model_class(SCREAMING_SNAKE_CASE__ )
snake_case: Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE__ , nn.Linear ) )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case: Optional[int] = model_class(SCREAMING_SNAKE_CASE__ )
snake_case: Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case: Optional[int] = [*signature.parameters.keys()]
snake_case: Dict = ['pixel_values']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE__ )
@slow
def _UpperCamelCase ( self ):
'''simple docstring'''
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case: Any = SwiftFormerModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
@unittest.skip(reason='SwiftFormer does not output attentions' )
def _UpperCamelCase ( self ):
'''simple docstring'''
pass
def _UpperCamelCase ( self ):
'''simple docstring'''
def check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case: Any = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
with torch.no_grad():
snake_case: Optional[Any] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
snake_case: int = outputs.hidden_states
snake_case: str = 8
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
snake_case: int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case: List[Any] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case: Any = True
check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
def _config_zero_init(SCREAMING_SNAKE_CASE__ ):
snake_case: Union[str, Any] = copy.deepcopy(SCREAMING_SNAKE_CASE__ )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 1E-10 )
if isinstance(getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ):
snake_case: Tuple = _config_zero_init(getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
setattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return configs_no_init
snake_case: Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case: List[Any] = _config_zero_init(SCREAMING_SNAKE_CASE__ )
for model_class in self.all_model_classes:
snake_case: Union[str, Any] = model_class(config=SCREAMING_SNAKE_CASE__ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9) / 1E9).round().item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _UpperCamelCase ( self ):
'''simple docstring'''
pass
def lowerCAmelCase_ ( ):
'''simple docstring'''
snake_case: int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _UpperCamelCase ( self ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained('MBZUAI/swiftformer-xs' ) if is_vision_available() else None
@slow
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Tuple = SwiftFormerForImageClassification.from_pretrained('MBZUAI/swiftformer-xs' ).to(SCREAMING_SNAKE_CASE__ )
snake_case: Any = self.default_image_processor
snake_case: List[str] = prepare_img()
snake_case: Union[str, Any] = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).to(SCREAMING_SNAKE_CASE__ )
# forward pass
with torch.no_grad():
snake_case: Any = model(**SCREAMING_SNAKE_CASE__ )
# verify the logits
snake_case: Optional[Any] = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE__ )
snake_case: Dict = torch.tensor([[-2.1703E00, 2.1107E00, -2.0811E00]] ).to(SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 ) ) | 705 |
'''simple docstring'''
from math import asin, atan, cos, radians, sin, sqrt, tan
__UpperCAmelCase = 6378137.0
__UpperCAmelCase = 6356752.314245
__UpperCAmelCase = 6_378_137
def lowerCAmelCase_ ( __A : float , __A : float , __A : float , __A : float ):
'''simple docstring'''
snake_case: Optional[Any] = (AXIS_A - AXIS_B) / AXIS_A
snake_case: List[Any] = atan((1 - flattening) * tan(radians(__A ) ) )
snake_case: List[Any] = atan((1 - flattening) * tan(radians(__A ) ) )
snake_case: Tuple = radians(__A )
snake_case: Tuple = radians(__A )
# Equation
snake_case: List[Any] = sin((phi_a - phi_a) / 2 )
snake_case: Dict = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
snake_case: Union[str, Any] = sqrt(sin_sq_phi + (cos(__A ) * cos(__A ) * sin_sq_lambda) )
return 2 * RADIUS * asin(__A )
if __name__ == "__main__":
import doctest
doctest.testmod() | 692 | 0 |
'''simple docstring'''
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=13 , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=[1, 2, 1] , SCREAMING_SNAKE_CASE__=[2, 2, 4] , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=2.0 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-5 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=10 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=["stage1", "stage2", "stage3"] , SCREAMING_SNAKE_CASE__=[1, 2, 3] , ):
'''simple docstring'''
snake_case: Optional[Any] = parent
snake_case: Tuple = batch_size
snake_case: Dict = image_size
snake_case: Union[str, Any] = patch_size
snake_case: Union[str, Any] = num_channels
snake_case: Any = embed_dim
snake_case: List[Any] = depths
snake_case: Any = num_heads
snake_case: Tuple = window_size
snake_case: str = mlp_ratio
snake_case: Optional[Any] = qkv_bias
snake_case: List[str] = hidden_dropout_prob
snake_case: Dict = attention_probs_dropout_prob
snake_case: str = drop_path_rate
snake_case: Tuple = hidden_act
snake_case: str = use_absolute_embeddings
snake_case: str = patch_norm
snake_case: Optional[Any] = layer_norm_eps
snake_case: Dict = initializer_range
snake_case: Any = is_training
snake_case: List[Any] = scope
snake_case: str = use_labels
snake_case: Optional[Any] = type_sequence_label_size
snake_case: Tuple = encoder_stride
snake_case: Optional[Any] = out_features
snake_case: int = out_indices
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case: Any = None
if self.use_labels:
snake_case: Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case: List[Any] = self.get_config()
return config, pixel_values, labels
def _UpperCamelCase ( self ):
'''simple docstring'''
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: Dict = MaskFormerSwinModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
snake_case: Any = model(SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
snake_case: Union[str, Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: List[Any] = MaskFormerSwinBackbone(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
snake_case: List[str] = model(SCREAMING_SNAKE_CASE__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(SCREAMING_SNAKE_CASE__ ):
snake_case: List[str] = ['stem']
snake_case: Tuple = MaskFormerSwinBackbone(config=SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: str = self.prepare_config_and_inputs()
snake_case: Dict = config_and_inputs
snake_case: List[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( snake_case , snake_case , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
__UpperCamelCase = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {}
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: str = MaskFormerSwinModelTester(self )
snake_case: Tuple = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
'`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with'
' `nn.DataParallel`'
) )
def _UpperCamelCase ( self ):
'''simple docstring'''
pass
def _UpperCamelCase ( self ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _UpperCamelCase ( self ):
'''simple docstring'''
return
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*SCREAMING_SNAKE_CASE__ )
@unittest.skip('Swin does not use inputs_embeds' )
def _UpperCamelCase ( self ):
'''simple docstring'''
pass
@unittest.skip('Swin does not support feedforward chunking' )
def _UpperCamelCase ( self ):
'''simple docstring'''
pass
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case: Tuple = model_class(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case: Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE__ , nn.Linear ) )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case: Any = model_class(SCREAMING_SNAKE_CASE__ )
snake_case: str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case: int = [*signature.parameters.keys()]
snake_case: Dict = ['pixel_values']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE__ )
@unittest.skip(reason='MaskFormerSwin is only used as backbone and doesn\'t support output_attentions' )
def _UpperCamelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='MaskFormerSwin is only used as an internal backbone' )
def _UpperCamelCase ( self ):
'''simple docstring'''
pass
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: Tuple = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
with torch.no_grad():
snake_case: Any = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
snake_case: Dict = outputs.hidden_states
snake_case: Optional[int] = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
# Swin has a different seq_length
snake_case: List[str] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
snake_case: List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: int = self.model_tester.prepare_config_and_inputs_for_common()
snake_case: Optional[int] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
snake_case: str = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case: Optional[int] = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case: Optional[Any] = 3
snake_case: List[str] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
snake_case: Optional[Any] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
snake_case: int = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
snake_case: Union[str, Any] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
snake_case: Any = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case: int = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , (padded_height, padded_width) )
@unittest.skip(reason='MaskFormerSwin doesn\'t have pretrained checkpoints' )
def _UpperCamelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def _UpperCamelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def _UpperCamelCase ( self ):
'''simple docstring'''
pass
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(SCREAMING_SNAKE_CASE__ ):
snake_case: Tuple = 0
return t
def check_equivalence(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__={} ):
with torch.no_grad():
snake_case: Tuple = model(**SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = model(**SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).to_tuple()
def recursive_check(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if isinstance(SCREAMING_SNAKE_CASE__ , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
recursive_check(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(SCREAMING_SNAKE_CASE__ ) , set_nan_tensor_to_zero(SCREAMING_SNAKE_CASE__ ) , atol=1E-5 ) , msg=(
'Tuple and dict output are not equal. Difference:'
F""" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:"""
F""" {torch.isnan(SCREAMING_SNAKE_CASE__ ).any()} and `inf`: {torch.isinf(SCREAMING_SNAKE_CASE__ )}. Dict has"""
F""" `nan`: {torch.isnan(SCREAMING_SNAKE_CASE__ ).any()} and `inf`: {torch.isinf(SCREAMING_SNAKE_CASE__ )}."""
) , )
recursive_check(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for model_class in self.all_model_classes:
snake_case: int = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
snake_case: Any = self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case: Optional[Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
check_equivalence(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case: List[str] = self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_labels=SCREAMING_SNAKE_CASE__ )
snake_case: Tuple = self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_labels=SCREAMING_SNAKE_CASE__ )
check_equivalence(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case: Any = self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case: str = self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
check_equivalence(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , {'output_hidden_states': True} )
snake_case: Optional[Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_labels=SCREAMING_SNAKE_CASE__ )
snake_case: str = self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_labels=SCREAMING_SNAKE_CASE__ )
check_equivalence(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , {'output_hidden_states': True} )
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase , snake_case ):
'''simple docstring'''
__UpperCamelCase = (MaskFormerSwinBackbone,) if is_torch_available() else ()
__UpperCamelCase = MaskFormerSwinConfig
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Dict = MaskFormerSwinModelTester(self )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case: Tuple = inputs_dict['pixel_values'].shape[0]
for backbone_class in self.all_model_classes:
snake_case: Tuple = backbone_class(SCREAMING_SNAKE_CASE__ )
backbone.to(SCREAMING_SNAKE_CASE__ )
backbone.eval()
snake_case: str = backbone(**SCREAMING_SNAKE_CASE__ )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , SCREAMING_SNAKE_CASE__ )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
snake_case: int = backbone(**SCREAMING_SNAKE_CASE__ , output_hidden_states=SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
snake_case: Any = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
snake_case: List[str] = backbone(**SCREAMING_SNAKE_CASE__ , output_attentions=SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(outputs.attentions ) | 706 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase = {
"configuration_roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerOnnxConfig"],
"tokenization_roformer": ["RoFormerTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ["RoFormerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoFormerForCausalLM",
"RoFormerForMaskedLM",
"RoFormerForMultipleChoice",
"RoFormerForQuestionAnswering",
"RoFormerForSequenceClassification",
"RoFormerForTokenClassification",
"RoFormerLayer",
"RoFormerModel",
"RoFormerPreTrainedModel",
"load_tf_weights_in_roformer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRoFormerForCausalLM",
"TFRoFormerForMaskedLM",
"TFRoFormerForMultipleChoice",
"TFRoFormerForQuestionAnswering",
"TFRoFormerForSequenceClassification",
"TFRoFormerForTokenClassification",
"TFRoFormerLayer",
"TFRoFormerModel",
"TFRoFormerPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxRoFormerForMaskedLM",
"FlaxRoFormerForMultipleChoice",
"FlaxRoFormerForQuestionAnswering",
"FlaxRoFormerForSequenceClassification",
"FlaxRoFormerForTokenClassification",
"FlaxRoFormerModel",
"FlaxRoFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 692 | 0 |
'''simple docstring'''
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self ):
'''simple docstring'''
snake_case: Dict = 0
snake_case: Dict = 0
snake_case: Dict = {}
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if vertex not in self.adjacency:
snake_case: List[Any] = {}
self.num_vertices += 1
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
self.add_vertex(SCREAMING_SNAKE_CASE__ )
self.add_vertex(SCREAMING_SNAKE_CASE__ )
if head == tail:
return
snake_case: Dict = weight
snake_case: Optional[int] = weight
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: str = self.get_edges()
for edge in edges:
snake_case: str = edge
edges.remove((tail, head, weight) )
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
snake_case: List[Any] = list(edges[i] )
edges.sort(key=lambda SCREAMING_SNAKE_CASE__ : e[2] )
for i in range(len(SCREAMING_SNAKE_CASE__ ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
snake_case: Optional[int] = edges[i][2] + 1
for edge in edges:
snake_case: int = edge
snake_case: Optional[int] = weight
snake_case: Optional[Any] = weight
def __str__( self ):
'''simple docstring'''
snake_case: Dict = ''
for tail in self.adjacency:
for head in self.adjacency[tail]:
snake_case: List[Any] = self.adjacency[head][tail]
string += F"""{head} -> {tail} == {weight}\n"""
return string.rstrip('\n' )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Union[str, Any] = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def _UpperCamelCase ( self ):
'''simple docstring'''
return self.adjacency.keys()
@staticmethod
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
snake_case: Dict = Graph()
if vertices is None:
snake_case: int = []
if edges is None:
snake_case: Tuple = []
for vertex in vertices:
g.add_vertex(SCREAMING_SNAKE_CASE__ )
for edge in edges:
g.add_edge(*SCREAMING_SNAKE_CASE__ )
return g
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self ):
'''simple docstring'''
snake_case: Optional[int] = {}
snake_case: Optional[Any] = {}
def __len__( self ):
'''simple docstring'''
return len(self.parent )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if item in self.parent:
return self.find(SCREAMING_SNAKE_CASE__ )
snake_case: Optional[Any] = item
snake_case: Tuple = 0
return item
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if item not in self.parent:
return self.make_set(SCREAMING_SNAKE_CASE__ )
if item != self.parent[item]:
snake_case: int = self.find(self.parent[item] )
return self.parent[item]
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: Optional[Any] = self.find(SCREAMING_SNAKE_CASE__ )
snake_case: List[Any] = self.find(SCREAMING_SNAKE_CASE__ )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
snake_case: List[Any] = roota
return roota
if self.rank[roota] < self.rank[roota]:
snake_case: Optional[Any] = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
snake_case: List[Any] = roota
return roota
return None
@staticmethod
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: List[str] = graph.num_vertices
snake_case: List[str] = Graph.UnionFind()
snake_case: Tuple = []
while num_components > 1:
snake_case: Union[str, Any] = {}
for vertex in graph.get_vertices():
snake_case: Dict = -1
snake_case: List[Any] = graph.get_edges()
for edge in edges:
snake_case: Dict = edge
edges.remove((tail, head, weight) )
for edge in edges:
snake_case: Dict = edge
snake_case: Optional[int] = union_find.find(SCREAMING_SNAKE_CASE__ )
snake_case: Dict = union_find.find(SCREAMING_SNAKE_CASE__ )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
snake_case: List[Any] = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
snake_case: Dict = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
snake_case: Any = cheap_edge[vertex]
if union_find.find(SCREAMING_SNAKE_CASE__ ) != union_find.find(SCREAMING_SNAKE_CASE__ ):
union_find.union(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
mst_edges.append(cheap_edge[vertex] )
snake_case: int = num_components - 1
snake_case: List[Any] = Graph.build(edges=SCREAMING_SNAKE_CASE__ )
return mst | 707 |
'''simple docstring'''
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def lowerCAmelCase_ ( __A : Tuple ):
'''simple docstring'''
snake_case: Tuple = model.config
snake_case: str = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=1_28 , )
snake_case: Optional[Any] = MBartConfig(
is_decoder=__A , is_encoder_decoder=__A , add_cross_attention=__A , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=__A , add_final_layer_norm=__A , )
return encoder_config, decoder_config
def lowerCAmelCase_ ( __A : int ):
'''simple docstring'''
if "encoder.model" in name:
snake_case: Optional[Any] = name.replace('encoder.model' , 'encoder' )
if "decoder.model" in name:
snake_case: str = name.replace('decoder.model' , 'decoder' )
if "patch_embed.proj" in name:
snake_case: Any = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
snake_case: Optional[int] = name.replace('patch_embed.norm' , 'embeddings.norm' )
if name.startswith('encoder' ):
if "layers" in name:
snake_case: Tuple = 'encoder.' + name
if "attn.proj" in name:
snake_case: Optional[int] = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name and "mask" not in name:
snake_case: Dict = name.replace('attn' , 'attention.self' )
if "norm1" in name:
snake_case: Union[str, Any] = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
snake_case: Dict = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
snake_case: List[str] = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
snake_case: Dict = name.replace('mlp.fc2' , 'output.dense' )
if name == "encoder.norm.weight":
snake_case: Dict = 'encoder.layernorm.weight'
if name == "encoder.norm.bias":
snake_case: int = 'encoder.layernorm.bias'
return name
def lowerCAmelCase_ ( __A : List[Any] , __A : Optional[Any] ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
snake_case: List[Any] = orig_state_dict.pop(__A )
if "qkv" in key:
snake_case: Union[str, Any] = key.split('.' )
snake_case: Optional[Any] = int(key_split[3] )
snake_case: Any = int(key_split[5] )
snake_case: Union[str, Any] = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
snake_case: Union[str, Any] = val[:dim, :]
snake_case: Any = val[dim : dim * 2, :]
snake_case: List[str] = val[-dim:, :]
else:
snake_case: str = val[:dim]
snake_case: Union[str, Any] = val[dim : dim * 2]
snake_case: List[Any] = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
snake_case: Optional[int] = val
return orig_state_dict
def lowerCAmelCase_ ( __A : List[Any] , __A : Any=None , __A : List[str]=False ):
'''simple docstring'''
snake_case: str = DonutModel.from_pretrained(__A ).eval()
# load HuggingFace model
snake_case , snake_case: Optional[Any] = get_configs(__A )
snake_case: Optional[int] = DonutSwinModel(__A )
snake_case: Tuple = MBartForCausalLM(__A )
snake_case: Optional[Any] = VisionEncoderDecoderModel(encoder=__A , decoder=__A )
model.eval()
snake_case: Optional[int] = original_model.state_dict()
snake_case: Optional[int] = convert_state_dict(__A , __A )
model.load_state_dict(__A )
# verify results on scanned document
snake_case: Union[str, Any] = load_dataset('hf-internal-testing/example-documents' )
snake_case: str = dataset['test'][0]['image'].convert('RGB' )
snake_case: Optional[int] = XLMRobertaTokenizerFast.from_pretrained(__A , from_slow=__A )
snake_case: Any = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
snake_case: Dict = DonutProcessor(__A , __A )
snake_case: Optional[Any] = processor(__A , return_tensors='pt' ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
snake_case: int = '<s_docvqa><s_question>{user_input}</s_question><s_answer>'
snake_case: Optional[Any] = 'When is the coffee break?'
snake_case: Optional[int] = task_prompt.replace('{user_input}' , __A )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
snake_case: Dict = '<s_rvlcdip>'
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
snake_case: str = '<s_cord>'
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
snake_case: str = 's_cord-v2>'
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
snake_case: int = '<s_zhtrainticket>'
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
snake_case: Optional[Any] = 'hello world'
else:
raise ValueError('Model name not supported' )
snake_case: Optional[int] = original_model.decoder.tokenizer(__A , add_special_tokens=__A , return_tensors='pt' )[
'input_ids'
]
snake_case: Any = original_model.encoder.model.patch_embed(__A )
snake_case , snake_case: Dict = model.encoder.embeddings(__A )
assert torch.allclose(__A , __A , atol=1E-3 )
# verify encoder hidden states
snake_case: Tuple = original_model.encoder(__A )
snake_case: List[str] = model.encoder(__A ).last_hidden_state
assert torch.allclose(__A , __A , atol=1E-2 )
# verify decoder hidden states
snake_case: List[Any] = original_model(__A , __A , __A ).logits
snake_case: List[Any] = model(__A , decoder_input_ids=__A ).logits
assert torch.allclose(__A , __A , atol=1E-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(f"""Saving model and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(__A )
processor.save_pretrained(__A )
if push_to_hub:
model.push_to_hub('nielsr/' + model_name.split('/' )[-1] , commit_message='Update model' )
processor.push_to_hub('nielsr/' + model_name.split('/' )[-1] , commit_message='Update model' )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="naver-clova-ix/donut-base-finetuned-docvqa",
required=False,
type=str,
help="Name of the original model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
required=False,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether or not to push the converted model and processor to the 🤗 hub.",
)
__UpperCAmelCase = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 692 | 0 |
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
__UpperCAmelCase = datasets.logging.get_logger(__name__)
__UpperCAmelCase = "\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",\n author = \"Moosavi, Nafise Sadat and\n Strube, Michael\",\n booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",\n month = aug,\n year = \"2016\",\n address = \"Berlin, Germany\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/P16-1060\",\n doi = \"10.18653/v1/P16-1060\",\n pages = \"632--642\",\n}\n\n"
__UpperCAmelCase = "\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n"
__UpperCAmelCase = "\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting 'keep_singletons=False', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n 'mentions': mentions\n 'muc': MUC metric [Vilain et al, 1995]\n 'bcub': B-cubed [Bagga and Baldwin, 1998]\n 'ceafe': CEAFe [Luo et al., 2005]\n 'lea': LEA [Moosavi and Strube, 2016]\n 'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric('coval')\n >>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',\n ... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',\n ... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',\n ... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',\n ... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',\n ... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {'mentions/recall': 1.0,[...] 'conll_score': 100.0}\n"
def lowerCAmelCase_ ( __A : Optional[int] , __A : Any , __A : List[str]=False , __A : Optional[Any]=False , __A : str=True , __A : Any=False , __A : Union[str, Any]="dummy_doc" ):
'''simple docstring'''
snake_case: str = {doc: key_lines}
snake_case: Union[str, Any] = {doc: sys_lines}
snake_case: Union[str, Any] = {}
snake_case: Optional[int] = 0
snake_case: Optional[int] = 0
snake_case: Optional[int] = 0
snake_case: List[str] = 0
snake_case: str = 0
snake_case: str = 0
snake_case: str = reader.get_doc_mentions(__A , key_doc_lines[doc] , __A )
key_singletons_num += singletons_num
if NP_only or min_span:
snake_case: Optional[int] = reader.set_annotated_parse_trees(__A , key_doc_lines[doc] , __A , __A )
snake_case: List[str] = reader.get_doc_mentions(__A , sys_doc_lines[doc] , __A )
sys_singletons_num += singletons_num
if NP_only or min_span:
snake_case: Union[str, Any] = reader.set_annotated_parse_trees(__A , key_doc_lines[doc] , __A , __A )
if remove_nested:
snake_case: Optional[int] = reader.remove_nested_coref_mentions(__A , __A )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
snake_case: str = reader.remove_nested_coref_mentions(__A , __A )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
snake_case: Optional[int] = reader.get_mention_assignments(__A , __A )
snake_case: Union[str, Any] = reader.get_mention_assignments(__A , __A )
snake_case: List[str] = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'Number of removed nested coreferring mentions in the key '
f"""annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}""" )
logger.info(
'Number of resulting singleton clusters in the key '
f"""annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}""" )
if not keep_singletons:
logger.info(
f"""{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system """
'files, respectively' )
return doc_coref_infos
def lowerCAmelCase_ ( __A : str , __A : Any , __A : List[str] , __A : Any , __A : List[str] , __A : Any , __A : int ):
'''simple docstring'''
snake_case: Dict = get_coref_infos(__A , __A , __A , __A , __A , __A )
snake_case: Union[str, Any] = {}
snake_case: List[Any] = 0
snake_case: int = 0
for name, metric in metrics:
snake_case: List[str] = evaluator.evaluate_documents(__A , __A , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({f"""{name}/recall""": recall, f"""{name}/precision""": precision, f"""{name}/f1""": fa} )
logger.info(
name.ljust(10 ) , f"""Recall: {recall * 1_00:.2f}""" , f""" Precision: {precision * 1_00:.2f}""" , f""" F1: {fa * 1_00:.2f}""" , )
if conll_subparts_num == 3:
snake_case: str = (conll / 3) * 1_00
logger.info(f"""CoNLL score: {conll:.2f}""" )
output_scores.update({'conll_score': conll} )
return output_scores
def lowerCAmelCase_ ( __A : str ):
'''simple docstring'''
snake_case: Union[str, Any] = False
for line in key_lines:
if not line.startswith('#' ):
if len(line.split() ) > 6:
snake_case: List[str] = line.split()[5]
if not parse_col == "-":
snake_case: List[Any] = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE ( datasets.Metric ):
'''simple docstring'''
def _UpperCamelCase ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' ) ),
'references': datasets.Sequence(datasets.Value('string' ) ),
} ) , codebase_urls=['https://github.com/ns-moosavi/coval'] , reference_urls=[
'https://github.com/ns-moosavi/coval',
'https://www.aclweb.org/anthology/P16-1060',
'http://www.conll.cemantix.org/2012/data.html',
] , )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=False ):
'''simple docstring'''
snake_case: Union[str, Any] = [
('mentions', evaluator.mentions),
('muc', evaluator.muc),
('bcub', evaluator.b_cubed),
('ceafe', evaluator.ceafe),
('lea', evaluator.lea),
]
if min_span:
snake_case: Optional[Any] = util.check_gold_parse_annotation(SCREAMING_SNAKE_CASE__ )
if not has_gold_parse:
raise NotImplementedError('References should have gold parse annotation to use \'min_span\'.' )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
snake_case: Tuple = evaluate(
key_lines=SCREAMING_SNAKE_CASE__ , sys_lines=SCREAMING_SNAKE_CASE__ , metrics=SCREAMING_SNAKE_CASE__ , NP_only=SCREAMING_SNAKE_CASE__ , remove_nested=SCREAMING_SNAKE_CASE__ , keep_singletons=SCREAMING_SNAKE_CASE__ , min_span=SCREAMING_SNAKE_CASE__ , )
return score | 708 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[Any] = {
'task_specific_params': {
'summarization': {'length_penalty': 1.0, 'max_length': 1_28, 'min_length': 12, 'num_beams': 4},
'summarization_cnn': {'length_penalty': 2.0, 'max_length': 1_42, 'min_length': 56, 'num_beams': 4},
'summarization_xsum': {'length_penalty': 1.0, 'max_length': 62, 'min_length': 11, 'num_beams': 6},
}
}
snake_case: Union[str, Any] = {
'task_specific_params.summarization.length_penalty': 1.0,
'task_specific_params.summarization.max_length': 1_28,
'task_specific_params.summarization.min_length': 12,
'task_specific_params.summarization.num_beams': 4,
'task_specific_params.summarization_cnn.length_penalty': 2.0,
'task_specific_params.summarization_cnn.max_length': 1_42,
'task_specific_params.summarization_cnn.min_length': 56,
'task_specific_params.summarization_cnn.num_beams': 4,
'task_specific_params.summarization_xsum.length_penalty': 1.0,
'task_specific_params.summarization_xsum.max_length': 62,
'task_specific_params.summarization_xsum.min_length': 11,
'task_specific_params.summarization_xsum.num_beams': 6,
}
self.assertEqual(flatten_dict(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Dict = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(SCREAMING_SNAKE_CASE__ ) , x.transpose() ) )
snake_case: List[str] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(SCREAMING_SNAKE_CASE__ , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Tuple = np.random.randn(3 , 4 )
snake_case: Optional[Any] = torch.tensor(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(transpose(SCREAMING_SNAKE_CASE__ ) , transpose(SCREAMING_SNAKE_CASE__ ).numpy() ) )
snake_case: Optional[Any] = np.random.randn(3 , 4 , 5 )
snake_case: Optional[int] = torch.tensor(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(transpose(SCREAMING_SNAKE_CASE__ , axes=(1, 2, 0) ) , transpose(SCREAMING_SNAKE_CASE__ , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: str = np.random.randn(3 , 4 )
snake_case: Optional[Any] = tf.constant(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(transpose(SCREAMING_SNAKE_CASE__ ) , transpose(SCREAMING_SNAKE_CASE__ ).numpy() ) )
snake_case: Dict = np.random.randn(3 , 4 , 5 )
snake_case: str = tf.constant(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(transpose(SCREAMING_SNAKE_CASE__ , axes=(1, 2, 0) ) , transpose(SCREAMING_SNAKE_CASE__ , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: int = np.random.randn(3 , 4 )
snake_case: Optional[int] = jnp.array(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(transpose(SCREAMING_SNAKE_CASE__ ) , np.asarray(transpose(SCREAMING_SNAKE_CASE__ ) ) ) )
snake_case: Any = np.random.randn(3 , 4 , 5 )
snake_case: Optional[Any] = jnp.array(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(transpose(SCREAMING_SNAKE_CASE__ , axes=(1, 2, 0) ) , np.asarray(transpose(SCREAMING_SNAKE_CASE__ , axes=(1, 2, 0) ) ) ) )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[Any] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(SCREAMING_SNAKE_CASE__ , (4, 3) ) , np.reshape(SCREAMING_SNAKE_CASE__ , (4, 3) ) ) )
snake_case: Optional[int] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(SCREAMING_SNAKE_CASE__ , (12, 5) ) , np.reshape(SCREAMING_SNAKE_CASE__ , (12, 5) ) ) )
@require_torch
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[Any] = np.random.randn(3 , 4 )
snake_case: Tuple = torch.tensor(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(reshape(SCREAMING_SNAKE_CASE__ , (4, 3) ) , reshape(SCREAMING_SNAKE_CASE__ , (4, 3) ).numpy() ) )
snake_case: List[str] = np.random.randn(3 , 4 , 5 )
snake_case: Tuple = torch.tensor(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(reshape(SCREAMING_SNAKE_CASE__ , (12, 5) ) , reshape(SCREAMING_SNAKE_CASE__ , (12, 5) ).numpy() ) )
@require_tf
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[Any] = np.random.randn(3 , 4 )
snake_case: Tuple = tf.constant(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(reshape(SCREAMING_SNAKE_CASE__ , (4, 3) ) , reshape(SCREAMING_SNAKE_CASE__ , (4, 3) ).numpy() ) )
snake_case: Any = np.random.randn(3 , 4 , 5 )
snake_case: Union[str, Any] = tf.constant(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(reshape(SCREAMING_SNAKE_CASE__ , (12, 5) ) , reshape(SCREAMING_SNAKE_CASE__ , (12, 5) ).numpy() ) )
@require_flax
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: int = np.random.randn(3 , 4 )
snake_case: str = jnp.array(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(reshape(SCREAMING_SNAKE_CASE__ , (4, 3) ) , np.asarray(reshape(SCREAMING_SNAKE_CASE__ , (4, 3) ) ) ) )
snake_case: Any = np.random.randn(3 , 4 , 5 )
snake_case: List[str] = jnp.array(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(reshape(SCREAMING_SNAKE_CASE__ , (12, 5) ) , np.asarray(reshape(SCREAMING_SNAKE_CASE__ , (12, 5) ) ) ) )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: int = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(SCREAMING_SNAKE_CASE__ ) , np.squeeze(SCREAMING_SNAKE_CASE__ ) ) )
snake_case: List[str] = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(SCREAMING_SNAKE_CASE__ , axis=2 ) , np.squeeze(SCREAMING_SNAKE_CASE__ , axis=2 ) ) )
@require_torch
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: int = np.random.randn(1 , 3 , 4 )
snake_case: List[str] = torch.tensor(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(squeeze(SCREAMING_SNAKE_CASE__ ) , squeeze(SCREAMING_SNAKE_CASE__ ).numpy() ) )
snake_case: Optional[Any] = np.random.randn(1 , 4 , 1 , 5 )
snake_case: int = torch.tensor(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(squeeze(SCREAMING_SNAKE_CASE__ , axis=2 ) , squeeze(SCREAMING_SNAKE_CASE__ , axis=2 ).numpy() ) )
@require_tf
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Any = np.random.randn(1 , 3 , 4 )
snake_case: Optional[Any] = tf.constant(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(squeeze(SCREAMING_SNAKE_CASE__ ) , squeeze(SCREAMING_SNAKE_CASE__ ).numpy() ) )
snake_case: Union[str, Any] = np.random.randn(1 , 4 , 1 , 5 )
snake_case: Union[str, Any] = tf.constant(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(squeeze(SCREAMING_SNAKE_CASE__ , axis=2 ) , squeeze(SCREAMING_SNAKE_CASE__ , axis=2 ).numpy() ) )
@require_flax
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[int] = np.random.randn(1 , 3 , 4 )
snake_case: List[Any] = jnp.array(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(squeeze(SCREAMING_SNAKE_CASE__ ) , np.asarray(squeeze(SCREAMING_SNAKE_CASE__ ) ) ) )
snake_case: Tuple = np.random.randn(1 , 4 , 1 , 5 )
snake_case: Tuple = jnp.array(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(squeeze(SCREAMING_SNAKE_CASE__ , axis=2 ) , np.asarray(squeeze(SCREAMING_SNAKE_CASE__ , axis=2 ) ) ) )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[int] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(SCREAMING_SNAKE_CASE__ , axis=1 ) , np.expand_dims(SCREAMING_SNAKE_CASE__ , axis=1 ) ) )
@require_torch
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: int = np.random.randn(3 , 4 )
snake_case: Any = torch.tensor(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(expand_dims(SCREAMING_SNAKE_CASE__ , axis=1 ) , expand_dims(SCREAMING_SNAKE_CASE__ , axis=1 ).numpy() ) )
@require_tf
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: str = np.random.randn(3 , 4 )
snake_case: Any = tf.constant(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(expand_dims(SCREAMING_SNAKE_CASE__ , axis=1 ) , expand_dims(SCREAMING_SNAKE_CASE__ , axis=1 ).numpy() ) )
@require_flax
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[Any] = np.random.randn(3 , 4 )
snake_case: int = jnp.array(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(expand_dims(SCREAMING_SNAKE_CASE__ , axis=1 ) , np.asarray(expand_dims(SCREAMING_SNAKE_CASE__ , axis=1 ) ) ) ) | 692 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
def lowerCAmelCase_ ( __A : Optional[int] , __A : int=False ):
'''simple docstring'''
snake_case: str = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""deit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""deit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""deit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""deit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""deit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""deit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""deit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""deit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""deit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""deit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'deit.embeddings.cls_token'),
('dist_token', 'deit.embeddings.distillation_token'),
('patch_embed.proj.weight', 'deit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'deit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'deit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
('pre_logits.fc.weight', 'pooler.dense.weight'),
('pre_logits.fc.bias', 'pooler.dense.bias'),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
snake_case: str = [(pair[0], pair[1][4:]) if pair[1].startswith('deit' ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
('norm.weight', 'deit.layernorm.weight'),
('norm.bias', 'deit.layernorm.bias'),
('head.weight', 'cls_classifier.weight'),
('head.bias', 'cls_classifier.bias'),
('head_dist.weight', 'distillation_classifier.weight'),
('head_dist.bias', 'distillation_classifier.bias'),
] )
return rename_keys
def lowerCAmelCase_ ( __A : Optional[Any] , __A : List[str] , __A : Tuple=False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
snake_case: List[Any] = ''
else:
snake_case: Optional[Any] = 'deit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case: Dict = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" )
snake_case: Union[str, Any] = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
snake_case: str = in_proj_weight[
: config.hidden_size, :
]
snake_case: int = in_proj_bias[: config.hidden_size]
snake_case: List[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case: int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case: List[Any] = in_proj_weight[
-config.hidden_size :, :
]
snake_case: List[str] = in_proj_bias[-config.hidden_size :]
def lowerCAmelCase_ ( __A : Dict , __A : Union[str, Any] , __A : List[str] ):
'''simple docstring'''
snake_case: Union[str, Any] = dct.pop(__A )
snake_case: Optional[Any] = val
def lowerCAmelCase_ ( ):
'''simple docstring'''
snake_case: int = 'http://images.cocodataset.org/val2017/000000039769.jpg'
snake_case: Union[str, Any] = Image.open(requests.get(__A , stream=__A ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( __A : Union[str, Any] , __A : Optional[int] ):
'''simple docstring'''
snake_case: str = DeiTConfig()
# all deit models have fine-tuned heads
snake_case: Union[str, Any] = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
snake_case: Any = 10_00
snake_case: List[str] = 'huggingface/label-files'
snake_case: Dict = 'imagenet-1k-id2label.json'
snake_case: List[str] = json.load(open(hf_hub_download(__A , __A , repo_type='dataset' ) , 'r' ) )
snake_case: List[str] = {int(__A ): v for k, v in idalabel.items()}
snake_case: Tuple = idalabel
snake_case: Union[str, Any] = {v: k for k, v in idalabel.items()}
snake_case: List[Any] = int(deit_name[-6:-4] )
snake_case: Optional[Any] = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith('tiny' ):
snake_case: List[str] = 1_92
snake_case: str = 7_68
snake_case: Union[str, Any] = 12
snake_case: int = 3
elif deit_name[9:].startswith('small' ):
snake_case: Optional[int] = 3_84
snake_case: List[str] = 15_36
snake_case: int = 12
snake_case: Optional[Any] = 6
if deit_name[9:].startswith('base' ):
pass
elif deit_name[4:].startswith('large' ):
snake_case: List[Any] = 10_24
snake_case: int = 40_96
snake_case: str = 24
snake_case: Dict = 16
# load original model from timm
snake_case: List[str] = timm.create_model(__A , pretrained=__A )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
snake_case: Dict = timm_model.state_dict()
snake_case: Union[str, Any] = create_rename_keys(__A , __A )
for src, dest in rename_keys:
rename_key(__A , __A , __A )
read_in_q_k_v(__A , __A , __A )
# load HuggingFace model
snake_case: List[Any] = DeiTForImageClassificationWithTeacher(__A ).eval()
model.load_state_dict(__A )
# Check outputs on an image, prepared by DeiTImageProcessor
snake_case: List[Any] = int(
(2_56 / 2_24) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
snake_case: Tuple = DeiTImageProcessor(size=__A , crop_size=config.image_size )
snake_case: Optional[Any] = image_processor(images=prepare_img() , return_tensors='pt' )
snake_case: List[str] = encoding['pixel_values']
snake_case: List[Any] = model(__A )
snake_case: Tuple = timm_model(__A )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__A , outputs.logits , atol=1E-3 )
Path(__A ).mkdir(exist_ok=__A )
print(f"""Saving model {deit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__A )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__A )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--deit_name",
default="vit_deit_base_distilled_patch16_224",
type=str,
help="Name of the DeiT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
__UpperCAmelCase = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path) | 709 |
'''simple docstring'''
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
__UpperCAmelCase = logging.get_logger(__name__)
# General docstring
__UpperCAmelCase = "PoolFormerConfig"
# Base docstring
__UpperCAmelCase = "sail/poolformer_s12"
__UpperCAmelCase = [1, 512, 7, 7]
# Image classification docstring
__UpperCAmelCase = "sail/poolformer_s12"
__UpperCAmelCase = "tabby, tabby cat"
__UpperCAmelCase = [
"sail/poolformer_s12",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def lowerCAmelCase_ ( __A : Tuple , __A : float = 0.0 , __A : bool = False ):
'''simple docstring'''
if drop_prob == 0.0 or not training:
return input
snake_case: Union[str, Any] = 1 - drop_prob
snake_case: List[Any] = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
snake_case: List[Any] = keep_prob + torch.rand(__A , dtype=input.dtype , device=input.device )
random_tensor.floor_() # binarize
snake_case: Any = input.div(__A ) * random_tensor
return output
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ = None ):
'''simple docstring'''
super().__init__()
snake_case: List[str] = drop_prob
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return drop_path(SCREAMING_SNAKE_CASE__ , self.drop_prob , self.training )
def _UpperCamelCase ( self ):
'''simple docstring'''
return "p={}".format(self.drop_prob )
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
super().__init__()
snake_case: List[str] = patch_size if isinstance(SCREAMING_SNAKE_CASE__ , collections.abc.Iterable ) else (patch_size, patch_size)
snake_case: List[str] = stride if isinstance(SCREAMING_SNAKE_CASE__ , collections.abc.Iterable ) else (stride, stride)
snake_case: Union[str, Any] = padding if isinstance(SCREAMING_SNAKE_CASE__ , collections.abc.Iterable ) else (padding, padding)
snake_case: Any = nn.Convad(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , kernel_size=SCREAMING_SNAKE_CASE__ , stride=SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ )
snake_case: Tuple = norm_layer(SCREAMING_SNAKE_CASE__ ) if norm_layer else nn.Identity()
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: int = self.projection(SCREAMING_SNAKE_CASE__ )
snake_case: Any = self.norm(SCREAMING_SNAKE_CASE__ )
return embeddings
class SCREAMING_SNAKE_CASE ( nn.GroupNorm ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
super().__init__(1 , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
super().__init__()
snake_case: str = nn.AvgPoolad(SCREAMING_SNAKE_CASE__ , stride=1 , padding=pool_size // 2 , count_include_pad=SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return self.pool(SCREAMING_SNAKE_CASE__ ) - hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
super().__init__()
snake_case: List[Any] = nn.Convad(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 1 )
snake_case: Any = nn.Convad(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 1 )
snake_case: str = PoolFormerDropPath(SCREAMING_SNAKE_CASE__ )
if isinstance(config.hidden_act , SCREAMING_SNAKE_CASE__ ):
snake_case: Tuple = ACTaFN[config.hidden_act]
else:
snake_case: int = config.hidden_act
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: Dict = self.conva(SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = self.act_fn(SCREAMING_SNAKE_CASE__ )
snake_case: List[str] = self.drop(SCREAMING_SNAKE_CASE__ )
snake_case: List[str] = self.conva(SCREAMING_SNAKE_CASE__ )
snake_case: str = self.drop(SCREAMING_SNAKE_CASE__ )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
super().__init__()
snake_case: Tuple = PoolFormerPooling(SCREAMING_SNAKE_CASE__ )
snake_case: Dict = PoolFormerOutput(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case: Dict = PoolFormerGroupNorm(SCREAMING_SNAKE_CASE__ )
snake_case: List[Any] = PoolFormerGroupNorm(SCREAMING_SNAKE_CASE__ )
# Useful for training neural nets
snake_case: Union[str, Any] = PoolFormerDropPath(SCREAMING_SNAKE_CASE__ ) if drop_path > 0.0 else nn.Identity()
snake_case: Optional[Any] = config.use_layer_scale
if config.use_layer_scale:
snake_case: Any = nn.Parameter(
config.layer_scale_init_value * torch.ones((SCREAMING_SNAKE_CASE__) ) , requires_grad=SCREAMING_SNAKE_CASE__ )
snake_case: int = nn.Parameter(
config.layer_scale_init_value * torch.ones((SCREAMING_SNAKE_CASE__) ) , requires_grad=SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if self.use_layer_scale:
snake_case: str = self.pooling(self.before_norm(SCREAMING_SNAKE_CASE__ ) )
snake_case: Dict = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
snake_case: str = hidden_states + self.drop_path(SCREAMING_SNAKE_CASE__ )
snake_case: Dict = ()
snake_case: Dict = self.output(self.after_norm(SCREAMING_SNAKE_CASE__ ) )
snake_case: Union[str, Any] = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
snake_case: Any = hidden_states + self.drop_path(SCREAMING_SNAKE_CASE__ )
snake_case: List[str] = (output,) + outputs
return outputs
else:
snake_case: Optional[Any] = self.drop_path(self.pooling(self.before_norm(SCREAMING_SNAKE_CASE__ ) ) )
# First residual connection
snake_case: Union[str, Any] = pooling_output + hidden_states
snake_case: List[Any] = ()
# Second residual connection inside the PoolFormerOutput block
snake_case: List[str] = self.drop_path(self.output(self.after_norm(SCREAMING_SNAKE_CASE__ ) ) )
snake_case: Dict = hidden_states + layer_output
snake_case: Optional[Any] = (output,) + outputs
return outputs
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
super().__init__()
snake_case: List[Any] = config
# stochastic depth decay rule
snake_case: List[Any] = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
snake_case: Union[str, Any] = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
snake_case: List[Any] = nn.ModuleList(SCREAMING_SNAKE_CASE__ )
# Transformer blocks
snake_case: str = []
snake_case: int = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
snake_case: List[str] = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
SCREAMING_SNAKE_CASE__ , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(SCREAMING_SNAKE_CASE__ ) )
snake_case: Tuple = nn.ModuleList(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=True ):
'''simple docstring'''
snake_case: str = () if output_hidden_states else None
snake_case: Dict = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
snake_case , snake_case: Dict = layers
# Get patch embeddings from hidden_states
snake_case: int = embedding_layer(SCREAMING_SNAKE_CASE__ )
# Send the embeddings through the blocks
for _, blk in enumerate(SCREAMING_SNAKE_CASE__ ):
snake_case: List[Any] = blk(SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = layer_outputs[0]
if output_hidden_states:
snake_case: List[str] = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=SCREAMING_SNAKE_CASE__ , hidden_states=SCREAMING_SNAKE_CASE__ )
class SCREAMING_SNAKE_CASE ( snake_case ):
'''simple docstring'''
__UpperCamelCase = PoolFormerConfig
__UpperCamelCase = "poolformer"
__UpperCamelCase = "pixel_values"
__UpperCamelCase = True
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE__ , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(SCREAMING_SNAKE_CASE__ , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False ):
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case: List[Any] = value
__UpperCAmelCase = R"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
__UpperCAmelCase = R"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n"
@add_start_docstrings(
"The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top." , snake_case , )
class SCREAMING_SNAKE_CASE ( snake_case ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
super().__init__(SCREAMING_SNAKE_CASE__ )
snake_case: List[Any] = config
snake_case: Tuple = PoolFormerEncoder(SCREAMING_SNAKE_CASE__ )
# Initialize weights and apply final processing
self.post_init()
def _UpperCamelCase ( self ):
'''simple docstring'''
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=SCREAMING_SNAKE_CASE__ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , ):
'''simple docstring'''
snake_case: Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
snake_case: List[Any] = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('You have to specify pixel_values' )
snake_case: Optional[Any] = self.encoder(
SCREAMING_SNAKE_CASE__ , output_hidden_states=SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ , )
snake_case: List[Any] = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=SCREAMING_SNAKE_CASE__ , hidden_states=encoder_outputs.hidden_states , )
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
super().__init__()
snake_case: Any = nn.Linear(config.hidden_size , config.hidden_size )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: int = self.dense(SCREAMING_SNAKE_CASE__ )
return output
@add_start_docstrings(
"\n PoolFormer Model transformer with an image classification head on top\n " , snake_case , )
class SCREAMING_SNAKE_CASE ( snake_case ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
super().__init__(SCREAMING_SNAKE_CASE__ )
snake_case: Union[str, Any] = config.num_labels
snake_case: str = PoolFormerModel(SCREAMING_SNAKE_CASE__ )
# Final norm
snake_case: int = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
snake_case: Dict = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=SCREAMING_SNAKE_CASE__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , ):
'''simple docstring'''
snake_case: Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
snake_case: Optional[Any] = self.poolformer(
SCREAMING_SNAKE_CASE__ , output_hidden_states=SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ , )
snake_case: Any = outputs[0]
snake_case: str = self.classifier(self.norm(SCREAMING_SNAKE_CASE__ ).mean([-2, -1] ) )
snake_case: Any = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
snake_case: Tuple = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
snake_case: Dict = 'single_label_classification'
else:
snake_case: List[str] = 'multi_label_classification'
if self.config.problem_type == "regression":
snake_case: Union[str, Any] = MSELoss()
if self.num_labels == 1:
snake_case: List[str] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
snake_case: int = loss_fct(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif self.config.problem_type == "single_label_classification":
snake_case: Union[str, Any] = CrossEntropyLoss()
snake_case: Dict = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
snake_case: int = BCEWithLogitsLoss()
snake_case: Optional[int] = loss_fct(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if not return_dict:
snake_case: str = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=SCREAMING_SNAKE_CASE__ , logits=SCREAMING_SNAKE_CASE__ , hidden_states=outputs.hidden_states ) | 692 | 0 |
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def lowerCAmelCase_ ( __A : str ):
'''simple docstring'''
snake_case: str = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(__A , __A )
def lowerCAmelCase_ ( __A : int ):
'''simple docstring'''
snake_case: Optional[Any] = emb.weight.shape
snake_case: Optional[Any] = nn.Linear(__A , __A , bias=__A )
snake_case: List[str] = emb.weight.data
return lin_layer
def lowerCAmelCase_ ( __A : Optional[Any] ):
'''simple docstring'''
snake_case: Tuple = torch.load(__A , map_location='cpu' )
snake_case: Optional[int] = mam_aaa['args'] or mam_aaa['cfg']['model']
snake_case: Union[str, Any] = mam_aaa['model']
remove_ignore_keys_(__A )
snake_case: Optional[Any] = state_dict['encoder.embed_tokens.weight'].shape[0]
snake_case: str = MaMaaaConfig(
vocab_size=__A , max_position_embeddings=10_24 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='relu' , )
snake_case: Optional[int] = state_dict['decoder.embed_tokens.weight']
snake_case: Union[str, Any] = MaMaaaForConditionalGeneration(__A )
model.model.load_state_dict(__A , strict=__A )
snake_case: Optional[int] = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument("fairseq_path", type=str, help="path to a model.pt on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
__UpperCAmelCase = parser.parse_args()
__UpperCAmelCase = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path) | 710 |
'''simple docstring'''
from queue import PriorityQueue
from typing import Any
import numpy as np
def lowerCAmelCase_ ( __A : dict , __A : str , __A : set , __A : set , __A : dict , __A : dict , __A : PriorityQueue , __A : dict , __A : float | int , ):
'''simple docstring'''
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
snake_case: Any = cst_fwd.get(__A , np.inf )
snake_case: int = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
snake_case: Union[str, Any] = new_cost_f
snake_case: Tuple = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
snake_case: List[str] = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def lowerCAmelCase_ ( __A : str , __A : str , __A : dict , __A : dict ):
'''simple docstring'''
snake_case: Optional[Any] = -1
snake_case: Any = set()
snake_case: str = set()
snake_case: int = {source: 0}
snake_case: Dict = {destination: 0}
snake_case: int = {source: None}
snake_case: Union[str, Any] = {destination: None}
snake_case: PriorityQueue[Any] = PriorityQueue()
snake_case: PriorityQueue[Any] = PriorityQueue()
snake_case: Tuple = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
snake_case , snake_case: List[str] = queue_forward.get()
visited_forward.add(__A )
snake_case , snake_case: int = queue_backward.get()
visited_backward.add(__A )
snake_case: str = pass_and_relaxation(
__A , __A , __A , __A , __A , __A , __A , __A , __A , )
snake_case: Optional[Any] = pass_and_relaxation(
__A , __A , __A , __A , __A , __A , __A , __A , __A , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
snake_case: Any = shortest_distance
return shortest_path_distance
__UpperCAmelCase = {
"B": [["C", 1]],
"C": [["D", 1]],
"D": [["F", 1]],
"E": [["B", 1], ["G", 2]],
"F": [],
"G": [["F", 1]],
}
__UpperCAmelCase = {
"B": [["E", 1]],
"C": [["B", 1]],
"D": [["C", 1]],
"F": [["D", 1], ["G", 1]],
"E": [[None, np.inf]],
"G": [["E", 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod() | 692 | 0 |
'''simple docstring'''
from __future__ import annotations
__UpperCAmelCase = [True] * 1_000_001
__UpperCAmelCase = 2
while i * i <= 1_000_000:
if seive[i]:
for j in range(i * i, 1_000_001, i):
__UpperCAmelCase = False
i += 1
def lowerCAmelCase_ ( __A : int ):
'''simple docstring'''
return seive[n]
def lowerCAmelCase_ ( __A : int ):
'''simple docstring'''
return any(digit in '02468' for digit in str(__A ) )
def lowerCAmelCase_ ( __A : int = 1_00_00_00 ):
'''simple docstring'''
snake_case: Optional[int] = [2] # result already includes the number 2.
for num in range(3 , limit + 1 , 2 ):
if is_prime(__A ) and not contains_an_even_digit(__A ):
snake_case: Optional[int] = str(__A )
snake_case: List[Any] = [int(str_num[j:] + str_num[:j] ) for j in range(len(__A ) )]
if all(is_prime(__A ) for i in list_nums ):
result.append(__A )
return result
def lowerCAmelCase_ ( ):
'''simple docstring'''
return len(find_circular_primes() )
if __name__ == "__main__":
print(F'{len(find_circular_primes()) = }') | 711 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = "▁"
__UpperCAmelCase = {"vocab_file": "sentencepiece.bpe.model"}
__UpperCAmelCase = {
"vocab_file": {
"facebook/xglm-564M": "https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model",
}
}
__UpperCAmelCase = {
"facebook/xglm-564M": 2_048,
}
class SCREAMING_SNAKE_CASE ( snake_case ):
'''simple docstring'''
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = ["input_ids", "attention_mask"]
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="<unk>" , SCREAMING_SNAKE_CASE__="<pad>" , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ):
'''simple docstring'''
snake_case: Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
snake_case: Optional[Any] = 7
snake_case: List[str] = [F"""<madeupword{i}>""" for i in range(self.num_madeup_words )]
snake_case: str = kwargs.get('additional_special_tokens' , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE__ , )
snake_case: int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(SCREAMING_SNAKE_CASE__ ) )
snake_case: int = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
snake_case: Tuple = 1
# Mimic fairseq token-to-id alignment for the first 4 token
snake_case: Optional[Any] = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
snake_case: Union[str, Any] = len(self.sp_model )
snake_case: str = {F"""<madeupword{i}>""": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(SCREAMING_SNAKE_CASE__ )
snake_case: Union[str, Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ):
'''simple docstring'''
snake_case: List[Any] = self.__dict__.copy()
snake_case: Union[str, Any] = None
snake_case: Union[str, Any] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: Optional[int] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
snake_case: Union[str, Any] = {}
snake_case: Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
snake_case: Optional[Any] = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE__ , token_ids_a=SCREAMING_SNAKE_CASE__ , already_has_special_tokens=SCREAMING_SNAKE_CASE__ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ ))
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE__ ))
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
'''simple docstring'''
snake_case: int = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[int] = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return self.sp_model.encode(SCREAMING_SNAKE_CASE__ , out_type=SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
snake_case: Dict = self.sp_model.PieceToId(SCREAMING_SNAKE_CASE__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: Optional[Any] = ''.join(SCREAMING_SNAKE_CASE__ ).replace(SCREAMING_SNAKE_CASE__ , ' ' ).strip()
return out_string
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
'''simple docstring'''
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case: List[str] = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE__ , 'wb' ) as fi:
snake_case: int = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE__ )
return (out_vocab_file,) | 692 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__UpperCAmelCase = {
"configuration_biogpt": ["BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BioGptConfig"],
"tokenization_biogpt": ["BioGptTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BioGptForCausalLM",
"BioGptForTokenClassification",
"BioGptForSequenceClassification",
"BioGptModel",
"BioGptPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 712 |
'''simple docstring'''
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def lowerCAmelCase_ ( __A : Optional[Any] ):
'''simple docstring'''
return getitem, k
def lowerCAmelCase_ ( __A : Any , __A : Optional[int] ):
'''simple docstring'''
return setitem, k, v
def lowerCAmelCase_ ( __A : List[str] ):
'''simple docstring'''
return delitem, k
def lowerCAmelCase_ ( __A : str , __A : int , *__A : Tuple ):
'''simple docstring'''
try:
return fun(__A , *__A ), None
except Exception as e:
return None, e
__UpperCAmelCase = (
_set("key_a", "val_a"),
_set("key_b", "val_b"),
)
__UpperCAmelCase = [
_set("key_a", "val_a"),
_set("key_a", "val_b"),
]
__UpperCAmelCase = [
_set("key_a", "val_a"),
_set("key_b", "val_b"),
_del("key_a"),
_del("key_b"),
_set("key_a", "val_a"),
_del("key_a"),
]
__UpperCAmelCase = [
_get("key_a"),
_del("key_a"),
_set("key_a", "val_a"),
_del("key_a"),
_del("key_a"),
_get("key_a"),
]
__UpperCAmelCase = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
__UpperCAmelCase = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set("key_a", "val_b"),
]
@pytest.mark.parametrize(
'operations' , (
pytest.param(_add_items , id='add items' ),
pytest.param(_overwrite_items , id='overwrite items' ),
pytest.param(_delete_items , id='delete items' ),
pytest.param(_access_absent_items , id='access absent items' ),
pytest.param(_add_with_resize_up , id='add with resize up' ),
pytest.param(_add_with_resize_down , id='add with resize down' ),
) , )
def lowerCAmelCase_ ( __A : str ):
'''simple docstring'''
snake_case: List[Any] = HashMap(initial_block_size=4 )
snake_case: List[Any] = {}
for _, (fun, *args) in enumerate(__A ):
snake_case , snake_case: Optional[int] = _run_operation(__A , __A , *__A )
snake_case , snake_case: str = _run_operation(__A , __A , *__A )
assert my_res == py_res
assert str(__A ) == str(__A )
assert set(__A ) == set(__A )
assert len(__A ) == len(__A )
assert set(my.items() ) == set(py.items() )
def lowerCAmelCase_ ( ):
'''simple docstring'''
def is_public(__A : str ) -> bool:
return not name.startswith('_' )
snake_case: Dict = {name for name in dir({} ) if is_public(__A )}
snake_case: List[str] = {name for name in dir(HashMap() ) if is_public(__A )}
assert dict_public_names > hash_public_names | 692 | 0 |
'''simple docstring'''
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class SCREAMING_SNAKE_CASE ( snake_case , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = TransfoXLTokenizer
__UpperCamelCase = False
__UpperCamelCase = False
def _UpperCamelCase ( self ):
'''simple docstring'''
super().setUp()
snake_case: Dict = [
'<unk>',
'[CLS]',
'[SEP]',
'want',
'unwanted',
'wa',
'un',
'running',
',',
'low',
'l',
]
snake_case: str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def _UpperCamelCase ( self , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: Optional[Any] = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: str = '<unk> UNwanted , running'
snake_case: int = '<unk> unwanted, running'
return input_text, output_text
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[Any] = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=SCREAMING_SNAKE_CASE__ )
snake_case: List[Any] = tokenizer.tokenize('<unk> UNwanted , running' )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , ['<unk>', 'unwanted', ',', 'running'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , [0, 4, 8, 7] )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: int = TransfoXLTokenizer(lower_case=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo ! how \n Are yoU ? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Any = TransfoXLTokenizer(lower_case=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo ! how \n Are yoU ? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Any = TransfoXLTokenizer(lower_case=SCREAMING_SNAKE_CASE__ )
snake_case: List[str] = 'Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?'
snake_case: Optional[int] = [
'Hello',
'(',
'bracket',
')',
'and',
'side',
'@-@',
'scrolled',
'[',
'and',
']',
'Henry',
'\'s',
'$',
'5',
'@,@',
'000',
'with',
'3',
'@.@',
'34',
'm',
'.',
'What',
'\'s',
'up',
'!',
'?',
]
self.assertListEqual(tokenizer.tokenize(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
self.assertEqual(tokenizer.convert_tokens_to_string(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[Any] = self.get_tokenizer()
snake_case: List[Any] = len(SCREAMING_SNAKE_CASE__ )
tokenizer.add_tokens(['new1', 'new2'] )
tokenizer.move_added_token('new1' , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode('new1' ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , 'new1' ) | 713 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
__UpperCAmelCase = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def lowerCAmelCase_ ( __A : Any , __A : Optional[Any] , __A : Union[str, Any] , __A : int , __A : Optional[int] ):
'''simple docstring'''
for attribute in key.split('.' ):
snake_case: List[str] = getattr(__A , __A )
if weight_type is not None:
snake_case: Optional[int] = getattr(__A , __A ).shape
else:
snake_case: Optional[int] = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
snake_case: Optional[int] = value
elif weight_type == "weight_g":
snake_case: List[str] = value
elif weight_type == "weight_v":
snake_case: Dict = value
elif weight_type == "bias":
snake_case: Optional[Any] = value
else:
snake_case: int = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def lowerCAmelCase_ ( __A : List[Any] , __A : List[str] ):
'''simple docstring'''
snake_case: List[Any] = []
snake_case: List[Any] = fairseq_model.state_dict()
snake_case: Union[str, Any] = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
snake_case: Dict = None
for name, value in fairseq_dict.items():
snake_case: Tuple = False
if "conv_layers" in name:
load_conv_layer(
__A , __A , __A , __A , hf_model.config.feat_extract_norm == 'group' , )
snake_case: List[Any] = True
elif name.split('.' )[0] == "proj":
snake_case: List[Any] = fairseq_model.proj
snake_case: int = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
snake_case: int = True
if "*" in mapped_key:
snake_case: List[str] = name.split(__A )[0].split('.' )[-2]
snake_case: Dict = mapped_key.replace('*' , __A )
if "weight_g" in name:
snake_case: Tuple = 'weight_g'
elif "weight_v" in name:
snake_case: int = 'weight_v'
elif "bias" in name:
snake_case: Tuple = 'bias'
elif "weight" in name:
snake_case: List[Any] = 'weight'
else:
snake_case: Any = None
set_recursively(__A , __A , __A , __A , __A )
continue
if not is_used:
unused_weights.append(__A )
logger.warning(f"""Unused weights: {unused_weights}""" )
return proj_weight
def lowerCAmelCase_ ( __A : List[str] , __A : List[Any] , __A : int , __A : Optional[Any] , __A : Optional[int] ):
'''simple docstring'''
snake_case: int = full_name.split('conv_layers.' )[-1]
snake_case: Tuple = name.split('.' )
snake_case: Any = int(items[0] )
snake_case: Optional[int] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
snake_case: Tuple = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
snake_case: int = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
snake_case: Any = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
snake_case: str = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__A )
def lowerCAmelCase_ ( __A : Dict ):
'''simple docstring'''
snake_case , snake_case: List[Any] = emb.weight.shape
snake_case: Optional[int] = nn.Linear(__A , __A , bias=__A )
snake_case: Any = emb.weight.data
return lin_layer
def lowerCAmelCase_ ( __A : Optional[int] ):
'''simple docstring'''
with open(__A , 'r' , encoding='utf-8' ) as f:
snake_case: List[Any] = f.readlines()
snake_case: Any = [line.split(' ' )[0] for line in lines]
snake_case: int = len(__A )
snake_case: Dict = {
'<s>': 0,
'<pad>': 1,
'</s>': 2,
'<unk>': 3,
}
vocab_dict.update(dict(zip(__A , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def lowerCAmelCase_ ( __A : Optional[Any] , __A : Union[str, Any] , __A : Dict , __A : Any , __A : List[Any] , __A : int , __A : str , ):
'''simple docstring'''
snake_case: Union[str, Any] = WavaVecaConfig.from_pretrained(__A )
snake_case: str = SpeechaTextaConfig.from_pretrained(
__A , vocab_size=__A , decoder_layers=__A , do_stable_layer_norm=__A )
snake_case: List[str] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=__A , return_attention_mask=__A , )
snake_case , snake_case , snake_case: List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
snake_case: List[Any] = model[0].eval()
# set weights for wav2vec2 encoder
snake_case: Optional[Any] = WavaVecaModel(__A )
snake_case: Any = recursively_load_weights_wavaveca(model.encoder , __A )
snake_case: Union[str, Any] = SpeechaTextaForCausalLM(__A )
snake_case , snake_case: Optional[Any] = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=__A )
# set output linear layer
unexpected_keys.remove('embed_out' )
snake_case: str = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(f"""The following keys are missing when loading the decoder weights: {missing_keys}""" )
logger.warning(f"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" )
snake_case: int = SpeechEncoderDecoderModel(encoder=__A , decoder=__A )
snake_case: List[Any] = False
# add projection layer
snake_case: Union[str, Any] = nn.Parameter(projection_layer.weight )
snake_case: Union[str, Any] = nn.Parameter(projection_layer.bias )
snake_case: List[Any] = create_vocab_dict(__A )
with open(os.path.join(__A , 'vocab.json' ) , 'w' ) as fp:
json.dump(__A , __A )
snake_case: Union[str, Any] = SpeechaTextaTokenizer(os.path.join(__A , 'vocab.json' ) )
tokenizer.save_pretrained(__A )
snake_case: Tuple = hf_wavavec.config.to_dict()
snake_case: int = tokenizer.pad_token_id
snake_case: Dict = tokenizer.bos_token_id
snake_case: Optional[int] = tokenizer.eos_token_id
snake_case: Dict = 'speech_to_text_2'
snake_case: Optional[Any] = 'wav2vec2'
snake_case: Tuple = SpeechEncoderDecoderConfig.from_dict(__A )
hf_wavavec.save_pretrained(__A )
feature_extractor.save_pretrained(__A )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument(
"--encoder_config_path",
default="facebook/wav2vec2-large-lv60",
type=str,
help="Path to hf encoder wav2vec2 checkpoint config",
)
parser.add_argument(
"--decoder_config_path",
default="facebook/s2t-small-mustc-en-fr-st",
type=str,
help="Path to hf decoder s2t checkpoint config",
)
parser.add_argument("--vocab_size", default=10_224, type=int, help="Vocab size of decoder")
parser.add_argument("--num_decoder_layers", default=7, type=int, help="Number of decoder layers")
__UpperCAmelCase = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
) | 692 | 0 |
'''simple docstring'''
import gc
import threading
import time
import psutil
import torch
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self ):
'''simple docstring'''
snake_case: Any = psutil.Process()
snake_case: Any = False
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Any = -1
while True:
snake_case: str = max(self.process.memory_info().rss , self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: str = True
snake_case: Union[str, Any] = threading.Thread(target=self.peak_monitor )
snake_case: List[Any] = True
self.thread.start()
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Tuple = False
self.thread.join()
return self.cpu_memory_peak
__UpperCAmelCase = PeakCPUMemory()
def lowerCAmelCase_ ( ):
'''simple docstring'''
snake_case: Any = {'time': time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
snake_case: Dict = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
snake_case: Union[str, Any] = torch.cuda.memory_allocated(__A )
torch.cuda.reset_peak_memory_stats()
return measures
def lowerCAmelCase_ ( __A : List[Any] ):
'''simple docstring'''
snake_case: str = {'time': time.time() - start_measures['time']}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
snake_case: Optional[Any] = (psutil.Process().memory_info().rss - start_measures['cpu']) / 2**20
snake_case: int = (cpu_peak_tracker.stop() - start_measures['cpu']) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
snake_case: List[Any] = (torch.cuda.memory_allocated(__A ) - start_measures[str(__A )]) / 2**20
snake_case: str = (torch.cuda.max_memory_allocated(__A ) - start_measures[str(__A )]) / 2**20
return measures
def lowerCAmelCase_ ( __A : Optional[Any] , __A : Dict ):
'''simple docstring'''
print(f"""{description}:""" )
print(f"""- Time: {measures['time']:.2f}s""" )
for i in range(torch.cuda.device_count() ):
print(f"""- GPU {i} allocated: {measures[str(__A )]:.2f}MiB""" )
snake_case: Optional[Any] = measures[f"""{i}-peak"""]
print(f"""- GPU {i} peak: {peak:.2f}MiB""" )
print(f"""- CPU RAM allocated: {measures['cpu']:.2f}MiB""" )
print(f"""- CPU RAM peak: {measures['cpu-peak']:.2f}MiB""" ) | 714 |
'''simple docstring'''
def lowerCAmelCase_ ( __A : int = 1_00 ):
'''simple docstring'''
snake_case: List[str] = n * (n + 1) * (2 * n + 1) / 6
snake_case: List[Any] = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F'{solution() = }') | 692 | 0 |
'''simple docstring'''
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
__UpperCAmelCase = [
# tf -> hf
("/", "."),
("layer_", "layers."),
("kernel", "weight"),
("beta", "bias"),
("gamma", "weight"),
("pegasus", "model"),
]
__UpperCAmelCase = [
(".output.dense", ".fc2"),
("intermediate.LayerNorm", "final_layer_norm"),
("intermediate.dense", "fc1"),
]
__UpperCAmelCase = (
INIT_COMMON
+ [
("attention.self.LayerNorm", "self_attn_layer_norm"),
("attention.output.dense", "self_attn.out_proj"),
("attention.self", "self_attn"),
("attention.encdec.LayerNorm", "encoder_attn_layer_norm"),
("attention.encdec_output.dense", "encoder_attn.out_proj"),
("attention.encdec", "encoder_attn"),
("key", "k_proj"),
("value", "v_proj"),
("query", "q_proj"),
("decoder.LayerNorm", "decoder.layernorm_embedding"),
]
+ END_COMMON
)
__UpperCAmelCase = (
INIT_COMMON
+ [
("embeddings.word_embeddings", "shared.weight"),
("embeddings.position_embeddings", "embed_positions.weight"),
("attention.self.LayerNorm", "self_attn_layer_norm"),
("attention.output.dense", "self_attn.output"),
("attention.self", "self_attn.self"),
("encoder.LayerNorm", "encoder.layernorm_embedding"),
]
+ END_COMMON
)
__UpperCAmelCase = [
"encdec/key/bias",
"encdec/query/bias",
"encdec/value/bias",
"self/key/bias",
"self/query/bias",
"self/value/bias",
"encdec_output/dense/bias",
"attention/output/dense/bias",
]
def lowerCAmelCase_ ( __A : Dict , __A : List[Any] ):
'''simple docstring'''
for tf_name, hf_name in patterns:
snake_case: List[Any] = k.replace(__A , __A )
return k
def lowerCAmelCase_ ( __A : dict , __A : dict ):
'''simple docstring'''
snake_case: Optional[int] = BigBirdPegasusConfig(**__A )
snake_case: List[Any] = BigBirdPegasusForConditionalGeneration(__A )
snake_case: Any = torch_model.state_dict()
snake_case: Any = {}
# separating decoder weights
snake_case: Optional[Any] = {k: tf_weights[k] for k in tf_weights if k.startswith('pegasus/decoder' )}
snake_case: Any = {k: tf_weights[k] for k in tf_weights if not k.startswith('pegasus/decoder' )}
for k, v in tqdm(decoder_weights.items() , 'tf -> hf conversion' ):
snake_case: List[str] = [k.endswith(__A ) for ending in KEYS_TO_IGNORE]
if any(__A ):
continue
snake_case: Any = DECODER_PATTERNS
snake_case: int = rename_state_dict_key(__A , __A )
if new_k not in state_dict:
raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ['dense', 'query', 'key', 'value'] ):
snake_case: Optional[Any] = v.T
snake_case: Any = torch.from_numpy(__A )
assert v.shape == state_dict[new_k].shape, f"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
for k, v in tqdm(remaining_weights.items() , 'tf -> hf conversion' ):
snake_case: List[Any] = [k.endswith(__A ) for ending in KEYS_TO_IGNORE]
if any(__A ):
continue
snake_case: Union[str, Any] = REMAINING_PATTERNS
snake_case: str = rename_state_dict_key(__A , __A )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ['dense', 'query', 'key', 'value'] ):
snake_case: int = v.T
snake_case: Any = torch.from_numpy(__A )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, f"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
snake_case: str = mapping['model.embed_positions.weight']
snake_case: Any = mapping.pop('model.embed_positions.weight' )
snake_case: Union[str, Any] = torch_model.load_state_dict(__A , strict=__A )
snake_case: Optional[int] = [
k
for k in missing
if k
not in [
'final_logits_bias',
'model.encoder.embed_tokens.weight',
'model.decoder.embed_tokens.weight',
'lm_head.weight',
]
]
assert unexpected_missing == [], f"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], f"""no matches found for the following tf keys {extra}"""
return torch_model
def lowerCAmelCase_ ( __A : Optional[int] ):
'''simple docstring'''
snake_case: Tuple = tf.train.list_variables(__A )
snake_case: str = {}
snake_case: List[str] = ['global_step']
for name, shape in tqdm(__A , desc='converting tf checkpoint to dict' ):
snake_case: str = any(pat in name for pat in ignore_name )
if skip_key:
continue
snake_case: Any = tf.train.load_variable(__A , __A )
snake_case: Optional[int] = array
return tf_weights
def lowerCAmelCase_ ( __A : str , __A : str , __A : dict ):
'''simple docstring'''
snake_case: int = get_tf_weights_as_numpy(__A )
snake_case: int = convert_bigbird_pegasus(__A , __A )
torch_model.save_pretrained(__A )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("--tf_ckpt_path", type=str, help="passed to tf.train.list_variables")
parser.add_argument("--save_dir", default=None, type=str, help="Path to the output PyTorch model.")
__UpperCAmelCase = parser.parse_args()
__UpperCAmelCase = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update) | 715 |
'''simple docstring'''
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
__UpperCAmelCase = [
# tf -> hf
("/", "."),
("layer_", "layers."),
("kernel", "weight"),
("beta", "bias"),
("gamma", "weight"),
("pegasus", "model"),
]
__UpperCAmelCase = [
(".output.dense", ".fc2"),
("intermediate.LayerNorm", "final_layer_norm"),
("intermediate.dense", "fc1"),
]
__UpperCAmelCase = (
INIT_COMMON
+ [
("attention.self.LayerNorm", "self_attn_layer_norm"),
("attention.output.dense", "self_attn.out_proj"),
("attention.self", "self_attn"),
("attention.encdec.LayerNorm", "encoder_attn_layer_norm"),
("attention.encdec_output.dense", "encoder_attn.out_proj"),
("attention.encdec", "encoder_attn"),
("key", "k_proj"),
("value", "v_proj"),
("query", "q_proj"),
("decoder.LayerNorm", "decoder.layernorm_embedding"),
]
+ END_COMMON
)
__UpperCAmelCase = (
INIT_COMMON
+ [
("embeddings.word_embeddings", "shared.weight"),
("embeddings.position_embeddings", "embed_positions.weight"),
("attention.self.LayerNorm", "self_attn_layer_norm"),
("attention.output.dense", "self_attn.output"),
("attention.self", "self_attn.self"),
("encoder.LayerNorm", "encoder.layernorm_embedding"),
]
+ END_COMMON
)
__UpperCAmelCase = [
"encdec/key/bias",
"encdec/query/bias",
"encdec/value/bias",
"self/key/bias",
"self/query/bias",
"self/value/bias",
"encdec_output/dense/bias",
"attention/output/dense/bias",
]
def lowerCAmelCase_ ( __A : Dict , __A : List[Any] ):
'''simple docstring'''
for tf_name, hf_name in patterns:
snake_case: List[Any] = k.replace(__A , __A )
return k
def lowerCAmelCase_ ( __A : dict , __A : dict ):
'''simple docstring'''
snake_case: Optional[int] = BigBirdPegasusConfig(**__A )
snake_case: List[Any] = BigBirdPegasusForConditionalGeneration(__A )
snake_case: Any = torch_model.state_dict()
snake_case: Any = {}
# separating decoder weights
snake_case: Optional[Any] = {k: tf_weights[k] for k in tf_weights if k.startswith('pegasus/decoder' )}
snake_case: Any = {k: tf_weights[k] for k in tf_weights if not k.startswith('pegasus/decoder' )}
for k, v in tqdm(decoder_weights.items() , 'tf -> hf conversion' ):
snake_case: List[str] = [k.endswith(__A ) for ending in KEYS_TO_IGNORE]
if any(__A ):
continue
snake_case: Any = DECODER_PATTERNS
snake_case: int = rename_state_dict_key(__A , __A )
if new_k not in state_dict:
raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ['dense', 'query', 'key', 'value'] ):
snake_case: Optional[Any] = v.T
snake_case: Any = torch.from_numpy(__A )
assert v.shape == state_dict[new_k].shape, f"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
for k, v in tqdm(remaining_weights.items() , 'tf -> hf conversion' ):
snake_case: List[Any] = [k.endswith(__A ) for ending in KEYS_TO_IGNORE]
if any(__A ):
continue
snake_case: Union[str, Any] = REMAINING_PATTERNS
snake_case: str = rename_state_dict_key(__A , __A )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ['dense', 'query', 'key', 'value'] ):
snake_case: int = v.T
snake_case: Any = torch.from_numpy(__A )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, f"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
snake_case: str = mapping['model.embed_positions.weight']
snake_case: Any = mapping.pop('model.embed_positions.weight' )
snake_case , snake_case: Union[str, Any] = torch_model.load_state_dict(__A , strict=__A )
snake_case: Optional[int] = [
k
for k in missing
if k
not in [
'final_logits_bias',
'model.encoder.embed_tokens.weight',
'model.decoder.embed_tokens.weight',
'lm_head.weight',
]
]
assert unexpected_missing == [], f"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], f"""no matches found for the following tf keys {extra}"""
return torch_model
def lowerCAmelCase_ ( __A : Optional[int] ):
'''simple docstring'''
snake_case: Tuple = tf.train.list_variables(__A )
snake_case: str = {}
snake_case: List[str] = ['global_step']
for name, shape in tqdm(__A , desc='converting tf checkpoint to dict' ):
snake_case: str = any(pat in name for pat in ignore_name )
if skip_key:
continue
snake_case: Any = tf.train.load_variable(__A , __A )
snake_case: Optional[int] = array
return tf_weights
def lowerCAmelCase_ ( __A : str , __A : str , __A : dict ):
'''simple docstring'''
snake_case: int = get_tf_weights_as_numpy(__A )
snake_case: int = convert_bigbird_pegasus(__A , __A )
torch_model.save_pretrained(__A )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("--tf_ckpt_path", type=str, help="passed to tf.train.list_variables")
parser.add_argument("--save_dir", default=None, type=str, help="Path to the output PyTorch model.")
__UpperCAmelCase = parser.parse_args()
__UpperCAmelCase = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update) | 692 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
__UpperCamelCase = 42
__UpperCamelCase = 42
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: list[list[Edge]] = [[] for _ in range(SCREAMING_SNAKE_CASE__ )]
snake_case: List[Any] = size
def __getitem__( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return iter(self._graph[vertex] )
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
return self._size
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if weight not in (0, 1):
raise ValueError('Edge weight must be either 0 or 1.' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('Vertex indexes must be in [0; size).' )
self._graph[from_vertex].append(Edge(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: Optional[Any] = deque([start_vertex] )
snake_case: list[int | None] = [None] * self.size
snake_case: int = 0
while queue:
snake_case: Optional[int] = queue.popleft()
snake_case: Dict = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
snake_case: str = current_distance + edge.weight
snake_case: List[Any] = distances[edge.destination_vertex]
if (
isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and new_distance >= dest_vertex_distance
):
continue
snake_case: Dict = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('No path from start_vertex to finish_vertex.' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod() | 716 |
'''simple docstring'''
def lowerCAmelCase_ ( __A : List[str] ):
'''simple docstring'''
snake_case: str = [0] * len(__A )
snake_case: Tuple = []
snake_case: Tuple = [1] * len(__A )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(__A ) ):
if indegree[i] == 0:
queue.append(__A )
while queue:
snake_case: int = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
snake_case: Any = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(__A )
print(max(__A ) )
# Adjacency list of Graph
__UpperCAmelCase = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph) | 692 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"microsoft/trocr-base-handwritten": (
"https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json"
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class SCREAMING_SNAKE_CASE ( snake_case ):
'''simple docstring'''
__UpperCamelCase = "trocr"
__UpperCamelCase = ["past_key_values"]
__UpperCamelCase = {
"num_attention_heads": "decoder_attention_heads",
"hidden_size": "d_model",
"num_hidden_layers": "decoder_layers",
}
def __init__( self , SCREAMING_SNAKE_CASE__=5_02_65 , SCREAMING_SNAKE_CASE__=10_24 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=40_96 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=5_12 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__=2 , **SCREAMING_SNAKE_CASE__ , ):
'''simple docstring'''
snake_case: str = vocab_size
snake_case: Dict = d_model
snake_case: Optional[int] = decoder_layers
snake_case: Union[str, Any] = decoder_attention_heads
snake_case: int = decoder_ffn_dim
snake_case: Tuple = activation_function
snake_case: int = max_position_embeddings
snake_case: Any = dropout
snake_case: List[str] = attention_dropout
snake_case: Optional[int] = activation_dropout
snake_case: Dict = init_std
snake_case: Optional[Any] = decoder_layerdrop
snake_case: Tuple = use_cache
snake_case: Tuple = scale_embedding
snake_case: List[Any] = use_learned_position_embeddings
snake_case: List[Any] = layernorm_embedding
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , decoder_start_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , ) | 717 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[Any] = tempfile.mkdtemp()
snake_case: Optional[Any] = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'的',
'价',
'格',
'是',
'15',
'便',
'alex',
'##andra',
',',
'。',
'-',
't',
'shirt',
]
snake_case: Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
snake_case: Optional[int] = {
'do_resize': True,
'size': {'height': 2_24, 'width': 2_24},
'do_center_crop': True,
'crop_size': {'height': 18, 'width': 18},
'do_normalize': True,
'image_mean': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'image_std': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
'do_convert_rgb': True,
}
snake_case: Union[str, Any] = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE__ )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return BertTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[int] = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
snake_case: Tuple = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[Any] = self.get_tokenizer()
snake_case: Union[str, Any] = self.get_rust_tokenizer()
snake_case: Union[str, Any] = self.get_image_processor()
snake_case: List[str] = ChineseCLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
processor_slow.save_pretrained(self.tmpdirname )
snake_case: List[str] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=SCREAMING_SNAKE_CASE__ )
snake_case: Any = ChineseCLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
processor_fast.save_pretrained(self.tmpdirname )
snake_case: Optional[int] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(processor_fast.tokenizer , SCREAMING_SNAKE_CASE__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(processor_fast.image_processor , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[Any] = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
snake_case: Optional[int] = self.get_tokenizer(cls_token='(CLS)' , sep_token='(SEP)' )
snake_case: Union[str, Any] = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE__ )
snake_case: Union[str, Any] = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token='(CLS)' , sep_token='(SEP)' , do_normalize=SCREAMING_SNAKE_CASE__ )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[int] = self.get_image_processor()
snake_case: Tuple = self.get_tokenizer()
snake_case: Optional[Any] = ChineseCLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = self.prepare_image_inputs()
snake_case: List[Any] = image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='np' )
snake_case: Dict = processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[Any] = self.get_image_processor()
snake_case: Optional[int] = self.get_tokenizer()
snake_case: List[Any] = ChineseCLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = 'Alexandra,T-shirt的价格是15便士。'
snake_case: Union[str, Any] = processor(text=SCREAMING_SNAKE_CASE__ )
snake_case: Optional[Any] = tokenizer(SCREAMING_SNAKE_CASE__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: int = self.get_image_processor()
snake_case: Dict = self.get_tokenizer()
snake_case: Optional[int] = ChineseCLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
snake_case: Dict = 'Alexandra,T-shirt的价格是15便士。'
snake_case: Tuple = self.prepare_image_inputs()
snake_case: Any = processor(text=SCREAMING_SNAKE_CASE__ , images=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
processor()
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: str = self.get_image_processor()
snake_case: str = self.get_tokenizer()
snake_case: Union[str, Any] = ChineseCLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
snake_case: int = processor.batch_decode(SCREAMING_SNAKE_CASE__ )
snake_case: Dict = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Dict = self.get_image_processor()
snake_case: Dict = self.get_tokenizer()
snake_case: Optional[int] = ChineseCLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
snake_case: Tuple = 'Alexandra,T-shirt的价格是15便士。'
snake_case: List[Any] = self.prepare_image_inputs()
snake_case: Dict = processor(text=SCREAMING_SNAKE_CASE__ , images=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names ) | 692 | 0 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import torch
from diffusers import DiffusionPipeline, ImagePipelineOutput
class SCREAMING_SNAKE_CASE ( snake_case ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
@torch.no_grad()
def __call__( self , SCREAMING_SNAKE_CASE__ = 1 , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = 50 , SCREAMING_SNAKE_CASE__ = "pil" , SCREAMING_SNAKE_CASE__ = True , **SCREAMING_SNAKE_CASE__ , ):
'''simple docstring'''
snake_case: int = torch.randn(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=SCREAMING_SNAKE_CASE__ , )
snake_case: str = image.to(self.device )
# set step values
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
snake_case: List[str] = self.unet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
snake_case: Optional[int] = self.scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).prev_sample
snake_case: int = (image / 2 + 0.5).clamp(0 , 1 )
snake_case: Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
snake_case: int = self.numpy_to_pil(SCREAMING_SNAKE_CASE__ )
if not return_dict:
return (image,), "This is a local test"
return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE__ ), "This is a local test" | 718 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"microsoft/swinv2-tiny-patch4-window8-256": (
"https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"
),
}
class SCREAMING_SNAKE_CASE ( snake_case ):
'''simple docstring'''
__UpperCamelCase = "swinv2"
__UpperCamelCase = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , SCREAMING_SNAKE_CASE__=2_24 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=96 , SCREAMING_SNAKE_CASE__=[2, 2, 6, 2] , SCREAMING_SNAKE_CASE__=[3, 6, 12, 24] , SCREAMING_SNAKE_CASE__=7 , SCREAMING_SNAKE_CASE__=4.0 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-5 , SCREAMING_SNAKE_CASE__=32 , **SCREAMING_SNAKE_CASE__ , ):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE__ )
snake_case: int = image_size
snake_case: Union[str, Any] = patch_size
snake_case: List[str] = num_channels
snake_case: Tuple = embed_dim
snake_case: str = depths
snake_case: Any = len(SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = num_heads
snake_case: Optional[int] = window_size
snake_case: Any = mlp_ratio
snake_case: Optional[int] = qkv_bias
snake_case: Union[str, Any] = hidden_dropout_prob
snake_case: List[str] = attention_probs_dropout_prob
snake_case: Dict = drop_path_rate
snake_case: List[str] = hidden_act
snake_case: int = use_absolute_embeddings
snake_case: Any = layer_norm_eps
snake_case: Dict = initializer_range
snake_case: List[Any] = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
snake_case: Tuple = int(embed_dim * 2 ** (len(SCREAMING_SNAKE_CASE__ ) - 1) )
snake_case: Union[str, Any] = (0, 0, 0, 0) | 692 | 0 |
'''simple docstring'''
import sys
__UpperCAmelCase = (
"73167176531330624919225119674426574742355349194934"
"96983520312774506326239578318016984801869478851843"
"85861560789112949495459501737958331952853208805511"
"12540698747158523863050715693290963295227443043557"
"66896648950445244523161731856403098711121722383113"
"62229893423380308135336276614282806444486645238749"
"30358907296290491560440772390713810515859307960866"
"70172427121883998797908792274921901699720888093776"
"65727333001053367881220235421809751254540594752243"
"52584907711670556013604839586446706324415722155397"
"53697817977846174064955149290862569321978468622482"
"83972241375657056057490261407972968652414535100474"
"82166370484403199890008895243450658541227588666881"
"16427171479924442928230863465674813919123162824586"
"17866458359124566529476545682848912883142607690042"
"24219022671055626321111109370544217506941658960408"
"07198403850962455444362981230987879927244284909188"
"84580156166097919133875499200524063689912560717606"
"05886116467109405077541002256983155200055935729725"
"71636269561882670428252483600823257530420752963450"
)
def lowerCAmelCase_ ( __A : str ):
'''simple docstring'''
snake_case: Tuple = 1
for digit in s:
product *= int(__A )
return product
def lowerCAmelCase_ ( __A : str = N ):
'''simple docstring'''
snake_case: List[str] = -sys.maxsize - 1
snake_case: int = n[:13]
snake_case: Union[str, Any] = 13
while cur_index < len(__A ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
snake_case: Optional[Any] = substr[1:] + n[cur_index]
cur_index += 1
else:
snake_case: Dict = max(__A , str_eval(__A ) )
snake_case: Optional[int] = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(F'{solution() = }')
| 719 |
'''simple docstring'''
import os
import sys
import unittest
__UpperCAmelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__UpperCAmelCase = os.path.join(git_repo_path, "src", "transformers")
__UpperCAmelCase = "\n{0} = None\n"
__UpperCAmelCase = "\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n"
__UpperCAmelCase = "\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n"
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[str] = find_backend(' _import_structure["models.albert"].append("AlbertTokenizerFast")' )
self.assertIsNone(SCREAMING_SNAKE_CASE__ )
snake_case: List[str] = find_backend(' if not is_tokenizers_available():' )
self.assertEqual(SCREAMING_SNAKE_CASE__ , 'tokenizers' )
snake_case: List[Any] = find_backend(' if not is_tensorflow_text_available():' )
self.assertEqual(SCREAMING_SNAKE_CASE__ , 'tensorflow_text' )
snake_case: int = find_backend(' if not (is_sentencepiece_available() and is_tokenizers_available()):' )
self.assertEqual(SCREAMING_SNAKE_CASE__ , 'sentencepiece_and_tokenizers' )
snake_case: Optional[Any] = find_backend(
' if not (is_sentencepiece_available() and is_tensorflow_text_available()):' )
self.assertEqual(SCREAMING_SNAKE_CASE__ , 'sentencepiece_and_tensorflow_text' )
snake_case: Dict = find_backend(
' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):' )
self.assertEqual(SCREAMING_SNAKE_CASE__ , 'sentencepiece_and_tokenizers_and_vision' )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: str = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('torch' , SCREAMING_SNAKE_CASE__ )
self.assertIn('tensorflow_text' , SCREAMING_SNAKE_CASE__ )
self.assertIn('sentencepiece_and_tokenizers' , SCREAMING_SNAKE_CASE__ )
# Likewise, we can't assert on the exact content of a key
self.assertIn('BertModel' , objects['torch'] )
self.assertIn('TFBertModel' , objects['tf'] )
self.assertIn('FlaxBertModel' , objects['flax'] )
self.assertIn('BertModel' , objects['torch'] )
self.assertIn('TFBertTokenizer' , objects['tensorflow_text'] )
self.assertIn('convert_slow_tokenizer' , objects['sentencepiece_and_tokenizers'] )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Dict = create_dummy_object('CONSTANT' , '\'torch\'' )
self.assertEqual(SCREAMING_SNAKE_CASE__ , '\nCONSTANT = None\n' )
snake_case: Any = create_dummy_object('function' , '\'torch\'' )
self.assertEqual(
SCREAMING_SNAKE_CASE__ , '\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n' )
snake_case: Optional[int] = '\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n'
snake_case: Tuple = create_dummy_object('FakeClass' , '\'torch\'' )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Dict = '# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, ["torch"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = ["torch"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, ["torch"])\n'
snake_case: Optional[int] = create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']} )
self.assertEqual(dummy_files['torch'] , SCREAMING_SNAKE_CASE__ ) | 692 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"junnyu/roformer_chinese_small": "https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json",
"junnyu/roformer_chinese_base": "https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json",
"junnyu/roformer_chinese_char_small": (
"https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json"
),
"junnyu/roformer_chinese_char_base": (
"https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json"
),
"junnyu/roformer_small_discriminator": (
"https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json"
),
"junnyu/roformer_small_generator": (
"https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json"
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class SCREAMING_SNAKE_CASE ( snake_case ):
'''simple docstring'''
__UpperCamelCase = "roformer"
def __init__( self , SCREAMING_SNAKE_CASE__=5_00_00 , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=7_68 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=30_72 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=15_36 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=True , **SCREAMING_SNAKE_CASE__ , ):
'''simple docstring'''
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
snake_case: Any = vocab_size
snake_case: List[Any] = hidden_size if embedding_size is None else embedding_size
snake_case: str = hidden_size
snake_case: List[Any] = num_hidden_layers
snake_case: Union[str, Any] = num_attention_heads
snake_case: Optional[Any] = hidden_act
snake_case: List[str] = intermediate_size
snake_case: Any = hidden_dropout_prob
snake_case: str = attention_probs_dropout_prob
snake_case: Any = max_position_embeddings
snake_case: int = type_vocab_size
snake_case: str = initializer_range
snake_case: Tuple = layer_norm_eps
snake_case: Dict = rotary_value
snake_case: str = use_cache
class SCREAMING_SNAKE_CASE ( snake_case ):
'''simple docstring'''
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
snake_case: Dict = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
snake_case: Union[str, Any] = {0: 'batch', 1: 'sequence'}
snake_case: Union[str, Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] ) | 720 |
'''simple docstring'''
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
__UpperCAmelCase = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: Dict = question_encoder
snake_case: Union[str, Any] = generator
snake_case: Optional[int] = self.question_encoder
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if os.path.isfile(SCREAMING_SNAKE_CASE__ ):
raise ValueError(F"""Provided path ({save_directory}) should be a directory, not a file""" )
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
snake_case: Tuple = os.path.join(SCREAMING_SNAKE_CASE__ , 'question_encoder_tokenizer' )
snake_case: Tuple = os.path.join(SCREAMING_SNAKE_CASE__ , 'generator_tokenizer' )
self.question_encoder.save_pretrained(SCREAMING_SNAKE_CASE__ )
self.generator.save_pretrained(SCREAMING_SNAKE_CASE__ )
@classmethod
def _UpperCamelCase ( cls , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
from ..auto.tokenization_auto import AutoTokenizer
snake_case: int = kwargs.pop('config' , SCREAMING_SNAKE_CASE__ )
if config is None:
snake_case: str = RagConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = AutoTokenizer.from_pretrained(
SCREAMING_SNAKE_CASE__ , config=config.question_encoder , subfolder='question_encoder_tokenizer' )
snake_case: Dict = AutoTokenizer.from_pretrained(
SCREAMING_SNAKE_CASE__ , config=config.generator , subfolder='generator_tokenizer' )
return cls(question_encoder=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ )
def __call__( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return self.current_tokenizer(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return self.generator.batch_decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return self.generator.decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Union[str, Any] = self.question_encoder
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Dict = self.generator
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = "longest" , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = True , **SCREAMING_SNAKE_CASE__ , ):
'''simple docstring'''
warnings.warn(
'`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the '
'regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` '
'context manager to prepare your targets. See the documentation of your specific tokenizer for more '
'details' , SCREAMING_SNAKE_CASE__ , )
if max_length is None:
snake_case: Optional[Any] = self.current_tokenizer.model_max_length
snake_case: int = self(
SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
snake_case: Any = self.current_tokenizer.model_max_length
snake_case: List[str] = self(
text_target=SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
snake_case: Dict = labels['input_ids']
return model_inputs | 692 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
__UpperCAmelCase = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( snake_case ):
'''simple docstring'''
def __init__( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
warnings.warn(
'The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use DeformableDetrImageProcessor instead.' , SCREAMING_SNAKE_CASE__ , )
super().__init__(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) | 721 |
'''simple docstring'''
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def lowerCAmelCase_ ( __A : Tuple ):
'''simple docstring'''
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def lowerCAmelCase_ ( ):
'''simple docstring'''
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def lowerCAmelCase_ ( ):
'''simple docstring'''
snake_case: Union[str, Any] = 'mock-s3-bucket'
snake_case: int = f"""s3://{mock_bucket}"""
snake_case: Any = extract_path_from_uri(__A )
assert dataset_path.startswith('s3://' ) is False
snake_case: Union[str, Any] = './local/path'
snake_case: Union[str, Any] = extract_path_from_uri(__A )
assert dataset_path == new_dataset_path
def lowerCAmelCase_ ( __A : Any ):
'''simple docstring'''
snake_case: List[str] = is_remote_filesystem(__A )
assert is_remote is True
snake_case: int = fsspec.filesystem('file' )
snake_case: int = is_remote_filesystem(__A )
assert is_remote is False
@pytest.mark.parametrize('compression_fs_class' , __A )
def lowerCAmelCase_ ( __A : Optional[int] , __A : int , __A : str , __A : Optional[Any] , __A : List[str] , __A : Optional[Any] , __A : Optional[int] ):
'''simple docstring'''
snake_case: Optional[Any] = {'gzip': gz_file, 'xz': xz_file, 'zstd': zstd_file, 'bz2': bza_file, 'lz4': lza_file}
snake_case: Optional[int] = input_paths[compression_fs_class.protocol]
if input_path is None:
snake_case: str = f"""for '{compression_fs_class.protocol}' compression protocol, """
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__A )
snake_case: List[str] = fsspec.filesystem(compression_fs_class.protocol , fo=__A )
assert isinstance(__A , __A )
snake_case: Any = os.path.basename(__A )
snake_case: int = expected_filename[: expected_filename.rindex('.' )]
assert fs.glob('*' ) == [expected_filename]
with fs.open(__A , 'r' , encoding='utf-8' ) as f, open(__A , encoding='utf-8' ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize('protocol' , ['zip', 'gzip'] )
def lowerCAmelCase_ ( __A : Any , __A : int , __A : int ):
'''simple docstring'''
snake_case: List[str] = {'zip': zip_jsonl_path, 'gzip': jsonl_gz_path}
snake_case: str = compressed_file_paths[protocol]
snake_case: Dict = 'dataset.jsonl'
snake_case: Optional[Any] = f"""{protocol}://{member_file_path}::{compressed_file_path}"""
snake_case , *snake_case: List[Any] = fsspec.get_fs_token_paths(__A )
assert fs.isfile(__A )
assert not fs.isfile('non_existing_' + member_file_path )
@pytest.mark.integration
def lowerCAmelCase_ ( __A : Optional[Any] , __A : Union[str, Any] , __A : Union[str, Any] , __A : List[Any] ):
'''simple docstring'''
snake_case: Tuple = hf_api.dataset_info(__A , token=__A )
snake_case: List[str] = HfFileSystem(repo_info=__A , token=__A )
assert sorted(hffs.glob('*' ) ) == [".gitattributes", "data"]
assert hffs.isdir('data' )
assert hffs.isfile('.gitattributes' ) and hffs.isfile('data/text_data.txt' )
with open(__A ) as f:
assert hffs.open('data/text_data.txt' , 'r' ).read() == f.read()
def lowerCAmelCase_ ( ):
'''simple docstring'''
snake_case: Union[str, Any] = 'bz2'
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(__A , __A , clobber=__A )
with pytest.warns(__A ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(__A ) == 1
assert (
str(warning_info[0].message )
== f"""A filesystem protocol was already set for {protocol} and will be overwritten."""
) | 692 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/config.json",
"umberto-commoncrawl-cased-v1": (
"https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"
),
"umberto-wikipedia-uncased-v1": (
"https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"
),
}
class SCREAMING_SNAKE_CASE ( snake_case ):
'''simple docstring'''
__UpperCamelCase = "camembert"
def __init__( self , SCREAMING_SNAKE_CASE__=3_05_22 , SCREAMING_SNAKE_CASE__=7_68 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=30_72 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=5_12 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__="absolute" , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=None , **SCREAMING_SNAKE_CASE__ , ):
'''simple docstring'''
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
snake_case: List[Any] = vocab_size
snake_case: List[Any] = hidden_size
snake_case: str = num_hidden_layers
snake_case: Tuple = num_attention_heads
snake_case: int = hidden_act
snake_case: Optional[int] = intermediate_size
snake_case: Any = hidden_dropout_prob
snake_case: int = attention_probs_dropout_prob
snake_case: Tuple = max_position_embeddings
snake_case: List[Any] = type_vocab_size
snake_case: Tuple = initializer_range
snake_case: List[Any] = layer_norm_eps
snake_case: Union[str, Any] = position_embedding_type
snake_case: int = use_cache
snake_case: Dict = classifier_dropout
class SCREAMING_SNAKE_CASE ( snake_case ):
'''simple docstring'''
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
snake_case: str = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
snake_case: List[str] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] ) | 700 |
'''simple docstring'''
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.17.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
__UpperCAmelCase = logging.getLogger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
__UpperCamelCase = field(
default="tab_fact" , metadata={"help": "The name of the dataset to use (via the datasets library)."} )
__UpperCamelCase = field(
default="tab_fact" , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} , )
__UpperCamelCase = field(
default=1024 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__UpperCamelCase = field(
default=snake_case , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
__UpperCamelCase = field(
default=snake_case , metadata={
"help": (
"Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
)
} , )
__UpperCamelCase = field(
default=snake_case , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
__UpperCamelCase = field(
default=snake_case , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
__UpperCamelCase = field(
default=snake_case , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
)
} , )
__UpperCamelCase = field(
default=snake_case , metadata={"help": "A csv or a json file containing the training data."} )
__UpperCamelCase = field(
default=snake_case , metadata={"help": "A csv or a json file containing the validation data."} )
__UpperCamelCase = field(default=snake_case , metadata={"help": "A csv or a json file containing the test data."} )
def _UpperCamelCase ( self ):
'''simple docstring'''
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('Need either a GLUE task, a training/validation file or a dataset name.' )
else:
snake_case: str = self.train_file.split('.' )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
snake_case: Optional[Any] = self.validation_file.split('.' )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
__UpperCamelCase = field(
default=snake_case , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
__UpperCamelCase = field(
default=snake_case , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
__UpperCamelCase = field(
default=snake_case , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
__UpperCamelCase = field(
default=snake_case , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
__UpperCamelCase = field(
default=snake_case , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
__UpperCamelCase = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
__UpperCamelCase = field(
default=snake_case , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
def lowerCAmelCase_ ( ):
'''simple docstring'''
snake_case: Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
snake_case , snake_case , snake_case: Union[str, Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
snake_case , snake_case , snake_case: str = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
snake_case: Tuple = training_args.get_process_log_level()
logger.setLevel(__A )
datasets.utils.logging.set_verbosity(__A )
transformers.utils.logging.set_verbosity(__A )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
snake_case: Any = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
snake_case: List[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
snake_case: int = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
snake_case: Optional[int] = {'train': data_args.train_file, 'validation': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
snake_case: Tuple = data_args.train_file.split('.' )[-1]
snake_case: Union[str, Any] = data_args.test_file.split('.' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
snake_case: Union[str, Any] = data_args.test_file
else:
raise ValueError('Need either a GLUE task or a test file for `do_predict`.' )
for key in data_files.keys():
logger.info(f"""load a local file for {key}: {data_files[key]}""" )
if data_args.train_file.endswith('.csv' ):
# Loading a dataset from local csv files
snake_case: List[Any] = load_dataset('csv' , data_files=__A , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
snake_case: Optional[Any] = load_dataset('json' , data_files=__A , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
snake_case: Tuple = raw_datasets['train'].features['label'].names
snake_case: List[str] = len(__A )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case: Tuple = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__A , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
snake_case: List[str] = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=__A , )
snake_case: Union[str, Any] = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=__A , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
snake_case: int = 'max_length'
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
snake_case: Union[str, Any] = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
snake_case: Optional[Any] = {'Refused': 0, 'Entailed': 1}
snake_case: List[Any] = {0: 'Refused', 1: 'Entailed'}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
snake_case: List[str] = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(__A : Any ):
# Tokenize the texts
def _convert_table_text_to_pandas(__A : Dict ):
snake_case: str = [_table_row.split('#' ) for _table_row in _table_text.strip('\n' ).split('\n' )]
snake_case: List[str] = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
snake_case: str = examples['statement']
snake_case: int = list(map(_convert_table_text_to_pandas , examples['table_text'] ) )
snake_case: List[Any] = tokenizer(__A , __A , padding=__A , max_length=__A , truncation=__A )
snake_case: List[Any] = examples['label']
return result
with training_args.main_process_first(desc='dataset map pre-processing' ):
snake_case: int = raw_datasets.map(
__A , batched=__A , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on dataset' , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('--do_train requires a train dataset' )
snake_case: List[str] = raw_datasets['train']
if data_args.max_train_samples is not None:
snake_case: Tuple = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('--do_eval requires a validation dataset' )
snake_case: Any = raw_datasets['validation']
if data_args.max_eval_samples is not None:
snake_case: Optional[int] = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('--do_predict requires a test dataset' )
snake_case: str = raw_datasets['test']
if data_args.max_predict_samples is not None:
snake_case: List[str] = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(__A ) ) , 3 ):
logger.info(f"""Sample {index} of the training set: {train_dataset[index]}.""" )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(__A : EvalPrediction ):
snake_case: int = p.predictions[0] if isinstance(p.predictions , __A ) else p.predictions
snake_case: List[str] = np.argmax(__A , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
snake_case: str = default_data_collator
elif training_args.fpaa:
snake_case: List[str] = DataCollatorWithPadding(__A , pad_to_multiple_of=8 )
else:
snake_case: List[Any] = None
# Initialize our Trainer
snake_case: List[str] = Trainer(
model=__A , args=__A , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=__A , tokenizer=__A , data_collator=__A , )
# Training
if training_args.do_train:
snake_case: Optional[int] = None
if training_args.resume_from_checkpoint is not None:
snake_case: str = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
snake_case: Optional[Any] = last_checkpoint
snake_case: Union[str, Any] = trainer.train(resume_from_checkpoint=__A )
snake_case: List[Any] = train_result.metrics
snake_case: List[Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__A )
)
snake_case: Optional[Any] = min(__A , len(__A ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('train' , __A )
trainer.save_metrics('train' , __A )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
snake_case: Dict = trainer.evaluate(eval_dataset=__A )
snake_case: Optional[int] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__A )
snake_case: Dict = min(__A , len(__A ) )
trainer.log_metrics('eval' , __A )
trainer.save_metrics('eval' , __A )
if training_args.do_predict:
logger.info('*** Predict ***' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
snake_case: Optional[int] = predict_dataset.remove_columns('label' )
snake_case: str = trainer.predict(__A , metric_key_prefix='predict' ).predictions
snake_case: Any = np.argmax(__A , axis=1 )
snake_case: int = os.path.join(training_args.output_dir , 'predict_results_tabfact.txt' )
if trainer.is_world_process_zero():
with open(__A , 'w' ) as writer:
logger.info('***** Predict Results *****' )
writer.write('index\tprediction\n' )
for index, item in enumerate(__A ):
snake_case: int = label_list[item]
writer.write(f"""{index}\t{item}\n""" )
snake_case: Optional[int] = {'finetuned_from': model_args.model_name_or_path, 'tasks': 'text-classification'}
if training_args.push_to_hub:
trainer.push_to_hub(**__A )
else:
trainer.create_model_card(**__A )
def lowerCAmelCase_ ( __A : Optional[Any] ):
'''simple docstring'''
main()
if __name__ == "__main__":
main() | 692 | 0 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def lowerCAmelCase_ ( __A : Union[str, Any] ):
'''simple docstring'''
if (
(cp >= 0X4E_00 and cp <= 0X9F_FF)
or (cp >= 0X34_00 and cp <= 0X4D_BF) #
or (cp >= 0X2_00_00 and cp <= 0X2_A6_DF) #
or (cp >= 0X2_A7_00 and cp <= 0X2_B7_3F) #
or (cp >= 0X2_B7_40 and cp <= 0X2_B8_1F) #
or (cp >= 0X2_B8_20 and cp <= 0X2_CE_AF) #
or (cp >= 0XF9_00 and cp <= 0XFA_FF)
or (cp >= 0X2_F8_00 and cp <= 0X2_FA_1F) #
): #
return True
return False
def lowerCAmelCase_ ( __A : str ):
'''simple docstring'''
for char in word:
snake_case: Any = ord(__A )
if not _is_chinese_char(__A ):
return 0
return 1
def lowerCAmelCase_ ( __A : List[str] ):
'''simple docstring'''
snake_case: int = set()
for token in tokens:
snake_case: Union[str, Any] = len(__A ) > 1 and is_chinese(__A )
if chinese_word:
word_set.add(__A )
snake_case: Optional[Any] = list(__A )
return word_list
def lowerCAmelCase_ ( __A : List[str] , __A : set() ):
'''simple docstring'''
if not chinese_word_set:
return bert_tokens
snake_case: Dict = max([len(__A ) for w in chinese_word_set] )
snake_case: List[Any] = bert_tokens
snake_case: Union[str, Any] = 0, len(__A )
while start < end:
snake_case: str = True
if is_chinese(bert_word[start] ):
snake_case: Tuple = min(end - start , __A )
for i in range(__A , 1 , -1 ):
snake_case: str = ''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
snake_case: List[str] = '##' + bert_word[j]
snake_case: List[str] = start + i
snake_case: Optional[int] = False
break
if single_word:
start += 1
return bert_word
def lowerCAmelCase_ ( __A : List[str] , __A : LTP , __A : BertTokenizer ):
'''simple docstring'''
snake_case: str = []
for i in range(0 , len(__A ) , 1_00 ):
snake_case: Union[str, Any] = ltp_tokenizer.seg(lines[i : i + 1_00] )[0]
snake_case: Tuple = [get_chinese_word(__A ) for r in res]
ltp_res.extend(__A )
assert len(__A ) == len(__A )
snake_case: Tuple = []
for i in range(0 , len(__A ) , 1_00 ):
snake_case: Optional[int] = bert_tokenizer(lines[i : i + 1_00] , add_special_tokens=__A , truncation=__A , max_length=5_12 )
bert_res.extend(res['input_ids'] )
assert len(__A ) == len(__A )
snake_case: Dict = []
for input_ids, chinese_word in zip(__A , __A ):
snake_case: Union[str, Any] = []
for id in input_ids:
snake_case: List[Any] = bert_tokenizer._convert_id_to_token(__A )
input_tokens.append(__A )
snake_case: str = add_sub_symbol(__A , __A )
snake_case: Optional[int] = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(__A ):
if token[:2] == "##":
snake_case: List[str] = token[2:]
# save chinese tokens' pos
if len(__A ) == 1 and _is_chinese_char(ord(__A ) ):
ref_id.append(__A )
ref_ids.append(__A )
assert len(__A ) == len(__A )
return ref_ids
def lowerCAmelCase_ ( __A : List[str] ):
'''simple docstring'''
with open(args.file_name , 'r' , encoding='utf-8' ) as f:
snake_case: Union[str, Any] = f.readlines()
snake_case: List[Any] = [line.strip() for line in data if len(__A ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
snake_case: str = LTP(args.ltp ) # faster in GPU device
snake_case: str = BertTokenizer.from_pretrained(args.bert )
snake_case: str = prepare_ref(__A , __A , __A )
with open(args.save_path , 'w' , encoding='utf-8' ) as f:
snake_case: Dict = [json.dumps(__A ) + '\n' for ref in ref_ids]
f.writelines(__A )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser(description="prepare_chinese_ref")
parser.add_argument(
"--file_name",
type=str,
default="./resources/chinese-demo.txt",
help="file need process, same as training data in lm",
)
parser.add_argument(
"--ltp", type=str, default="./resources/ltp", help="resources for LTP tokenizer, usually a path"
)
parser.add_argument("--bert", type=str, default="./resources/robert", help="resources for Bert tokenizer")
parser.add_argument("--save_path", type=str, default="./resources/ref.txt", help="path to save res")
__UpperCAmelCase = parser.parse_args()
main(args) | 701 |
'''simple docstring'''
import math
def lowerCAmelCase_ ( __A : int ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__A ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCAmelCase_ ( __A : float = 0.1 ):
'''simple docstring'''
snake_case: Optional[int] = 3
snake_case: int = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(__A )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod() | 692 | 0 |
'''simple docstring'''
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
__UpperCAmelCase = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
__UpperCAmelCase = direct_transformers_import(PATH_TO_TRANSFORMERS)
__UpperCAmelCase = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
__UpperCAmelCase = re.compile(R"\[(.+?)\]\((https://huggingface\.co/.+?)\)")
__UpperCAmelCase = {
"DecisionTransformerConfig",
"EncoderDecoderConfig",
"MusicgenConfig",
"RagConfig",
"SpeechEncoderDecoderConfig",
"TimmBackboneConfig",
"VisionEncoderDecoderConfig",
"VisionTextDualEncoderConfig",
"LlamaConfig",
}
def lowerCAmelCase_ ( __A : Any ):
'''simple docstring'''
snake_case: Union[str, Any] = None
# source code of `config_class`
snake_case: Optional[int] = inspect.getsource(__A )
snake_case: str = _re_checkpoint.findall(__A )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith('/' ):
snake_case: Optional[int] = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
snake_case: int = f"""https://huggingface.co/{ckpt_name}"""
if ckpt_link == ckpt_link_from_name:
snake_case: List[str] = ckpt_name
break
return checkpoint
def lowerCAmelCase_ ( ):
'''simple docstring'''
snake_case: int = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
snake_case: str = get_checkpoint_from_config_class(__A )
snake_case: Union[str, Any] = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(__A )
if len(__A ) > 0:
snake_case: Any = '\n'.join(sorted(__A ) )
raise ValueError(f"""The following configurations don't contain any valid checkpoint:\n{message}""" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 702 |
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
__UpperCAmelCase = "pt"
elif is_tf_available():
__UpperCAmelCase = "tf"
else:
__UpperCAmelCase = "jax"
class SCREAMING_SNAKE_CASE ( snake_case , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = ByTaTokenizer
__UpperCamelCase = False
def _UpperCamelCase ( self ):
'''simple docstring'''
super().setUp()
snake_case: int = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _UpperCamelCase ( self ):
'''simple docstring'''
return ByTaTokenizer.from_pretrained('google/byt5-small' )
def _UpperCamelCase ( self , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=20 , SCREAMING_SNAKE_CASE__=5 ):
'''simple docstring'''
snake_case: Optional[Any] = []
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
try:
snake_case: Optional[Any] = tokenizer.decode([i] , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
snake_case: List[str] = list(filter(lambda SCREAMING_SNAKE_CASE__ : re.match(r'^[ a-zA-Z]+$' , t[1] ) , SCREAMING_SNAKE_CASE__ ) )
snake_case: str = list(filter(lambda SCREAMING_SNAKE_CASE__ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ) )
if max_length is not None and len(SCREAMING_SNAKE_CASE__ ) > max_length:
snake_case: Union[str, Any] = toks[:max_length]
if min_length is not None and len(SCREAMING_SNAKE_CASE__ ) < min_length and len(SCREAMING_SNAKE_CASE__ ) > 0:
while len(SCREAMING_SNAKE_CASE__ ) < min_length:
snake_case: Tuple = toks + toks
# toks_str = [t[1] for t in toks]
snake_case: Dict = [t[0] for t in toks]
# Ensure consistency
snake_case: int = tokenizer.decode(SCREAMING_SNAKE_CASE__ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__ )
if " " not in output_txt and len(SCREAMING_SNAKE_CASE__ ) > 1:
snake_case: str = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__ )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__ )
)
if with_prefix_space:
snake_case: Tuple = ' ' + output_txt
snake_case: Optional[int] = tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
return output_txt, output_ids
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Any = self.ta_base_tokenizer
snake_case: str = tokenizer(['hi</s>', 'I went to the gym</s>', '</s>'] )
snake_case: List[Any] = tokenizer(['hi', 'I went to the gym', ''] )
self.assertListEqual(batch_with_eos_added['input_ids'] , batch_without_eos_added['input_ids'] )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Any = self.ta_base_tokenizer
snake_case: Union[str, Any] = 'Unicode €.'
snake_case: str = tokenizer(SCREAMING_SNAKE_CASE__ )
snake_case: Tuple = [88, 1_13, 1_08, 1_02, 1_14, 1_03, 1_04, 35, 2_29, 1_33, 1_75, 49, 1]
self.assertEqual(encoded['input_ids'] , SCREAMING_SNAKE_CASE__ )
# decoding
snake_case: List[str] = tokenizer.decode(SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , 'Unicode €.</s>' )
snake_case: List[Any] = tokenizer('e è é ê ë' )
snake_case: Optional[Any] = [1_04, 35, 1_98, 1_71, 35, 1_98, 1_72, 35, 1_98, 1_73, 35, 1_98, 1_74, 1]
self.assertEqual(encoded['input_ids'] , SCREAMING_SNAKE_CASE__ )
# decoding
snake_case: List[Any] = tokenizer.decode(SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , 'e è é ê ë</s>' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , 'e è é ê ë</s>' )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: str = self.ta_base_tokenizer
snake_case: Optional[Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
snake_case: Optional[int] = [68, 35, 1_11, 1_14, 1_13, 1_06, 35, 1_15, 1_00, 1_17, 1_00, 1_06, 1_17, 1_00, 1_15, 1_07, 35, 1_05, 1_14, 1_17, 35, 1_18, 1_20, 1_12, 1_12, 1_00, 1_17, 1_08, 1_25, 1_00, 1_19, 1_08, 1_14, 1_13, 49, 1, 0]
# fmt: on
snake_case: str = tokenizer(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if FRAMEWORK != "jax":
snake_case: Optional[Any] = list(batch.input_ids.numpy()[0] )
else:
snake_case: Dict = list(batch.input_ids.tolist()[0] )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Dict = self.ta_base_tokenizer
snake_case: List[str] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
snake_case: Union[str, Any] = tokenizer(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , SCREAMING_SNAKE_CASE__ )
self.assertIn('attention_mask' , SCREAMING_SNAKE_CASE__ )
self.assertNotIn('decoder_input_ids' , SCREAMING_SNAKE_CASE__ )
self.assertNotIn('decoder_attention_mask' , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[str] = self.ta_base_tokenizer
snake_case: str = [
'Summary of the text.',
'Another summary.',
]
snake_case: Dict = tokenizer(
text_target=SCREAMING_SNAKE_CASE__ , max_length=32 , padding='max_length' , truncation=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ )
self.assertEqual(32 , targets['input_ids'].shape[1] )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Union[str, Any] = self.ta_base_tokenizer
snake_case: Optional[int] = ['A long paragraph for summarization. </s>']
snake_case: str = ['Summary of the text. </s>']
# fmt: off
snake_case: str = [68, 35, 1_11, 1_14, 1_13, 1_06, 35, 1_15, 1_00, 1_17, 1_00, 1_06, 1_17, 1_00, 1_15, 1_07, 35, 1_05, 1_14, 1_17, 35, 1_18, 1_20, 1_12, 1_12, 1_00, 1_17, 1_08, 1_25, 1_00, 1_19, 1_08, 1_14, 1_13, 49, 35, 1]
snake_case: Optional[int] = [86, 1_20, 1_12, 1_12, 1_00, 1_17, 1_24, 35, 1_14, 1_05, 35, 1_19, 1_07, 1_04, 35, 1_19, 1_04, 1_23, 1_19, 49, 35, 1]
# fmt: on
snake_case: List[Any] = tokenizer(SCREAMING_SNAKE_CASE__ , text_target=SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , batch['input_ids'][0] )
self.assertEqual(SCREAMING_SNAKE_CASE__ , batch['labels'][0] )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Any = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
snake_case: Optional[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
snake_case: Union[str, Any] = tempfile.mkdtemp()
snake_case: Dict = ' He is very happy, UNwant\u00E9d,running'
snake_case: Optional[int] = tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = tokenizer.__class__.from_pretrained(SCREAMING_SNAKE_CASE__ )
snake_case: Any = after_tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
shutil.rmtree(SCREAMING_SNAKE_CASE__ )
snake_case: Any = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
snake_case: List[str] = tempfile.mkdtemp()
snake_case: str = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
snake_case: List[str] = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
snake_case: int = tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
snake_case: Tuple = tokenizer.__class__.from_pretrained(SCREAMING_SNAKE_CASE__ )
snake_case: Tuple = after_tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
snake_case: Union[str, Any] = tokenizer.__class__.from_pretrained(SCREAMING_SNAKE_CASE__ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Union[str, Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(SCREAMING_SNAKE_CASE__ )
with open(os.path.join(SCREAMING_SNAKE_CASE__ , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
snake_case: Any = json.load(SCREAMING_SNAKE_CASE__ )
with open(os.path.join(SCREAMING_SNAKE_CASE__ , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
snake_case: str = json.load(SCREAMING_SNAKE_CASE__ )
snake_case: int = [F"""<extra_id_{i}>""" for i in range(1_25 )]
snake_case: Optional[int] = added_tokens_extra_ids + [
'an_additional_special_token'
]
snake_case: str = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(SCREAMING_SNAKE_CASE__ , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
with open(os.path.join(SCREAMING_SNAKE_CASE__ , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
snake_case: Dict = tokenizer_class.from_pretrained(
SCREAMING_SNAKE_CASE__ , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
snake_case: Union[str, Any] = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=SCREAMING_SNAKE_CASE__ )]
snake_case: Union[str, Any] = tokenizer_class.from_pretrained(
SCREAMING_SNAKE_CASE__ , additional_special_tokens=SCREAMING_SNAKE_CASE__ , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[str] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(SCREAMING_SNAKE_CASE__ )
snake_case: Optional[Any] = tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertTrue(tokenizer.decode([2_55] ) == '' )
def _UpperCamelCase ( self ):
'''simple docstring'''
pass
def _UpperCamelCase ( self ):
'''simple docstring'''
pass
def _UpperCamelCase ( self ):
'''simple docstring'''
pass
def _UpperCamelCase ( self ):
'''simple docstring'''
pass
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Dict = self.get_tokenizers(fast=SCREAMING_SNAKE_CASE__ , do_lower_case=SCREAMING_SNAKE_CASE__ )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
snake_case: Union[str, Any] = ['t', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 'x', 't', '</s>']
snake_case: List[str] = tokenizer.convert_tokens_to_string(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: int = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
snake_case: Optional[Any] = [
'bos_token',
'eos_token',
'unk_token',
'sep_token',
'pad_token',
'cls_token',
'mask_token',
]
snake_case: Dict = 0
snake_case: List[Any] = tokenizer.convert_ids_to_tokens(
SCREAMING_SNAKE_CASE__ , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
for attr in attributes_list:
setattr(SCREAMING_SNAKE_CASE__ , attr + '_id' , SCREAMING_SNAKE_CASE__ )
self.assertEqual(getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
self.assertEqual(getattr(SCREAMING_SNAKE_CASE__ , attr + '_id' ) , SCREAMING_SNAKE_CASE__ )
setattr(SCREAMING_SNAKE_CASE__ , attr + '_id' , SCREAMING_SNAKE_CASE__ )
self.assertEqual(getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
self.assertEqual(getattr(SCREAMING_SNAKE_CASE__ , attr + '_id' ) , SCREAMING_SNAKE_CASE__ )
setattr(SCREAMING_SNAKE_CASE__ , 'additional_special_tokens_ids' , [] )
self.assertListEqual(getattr(SCREAMING_SNAKE_CASE__ , 'additional_special_tokens' ) , [] )
self.assertListEqual(getattr(SCREAMING_SNAKE_CASE__ , 'additional_special_tokens_ids' ) , [] )
setattr(SCREAMING_SNAKE_CASE__ , 'additional_special_tokens_ids' , [token_id_to_test_setters] )
self.assertListEqual(getattr(SCREAMING_SNAKE_CASE__ , 'additional_special_tokens' ) , [token_to_test_setters] )
self.assertListEqual(getattr(SCREAMING_SNAKE_CASE__ , 'additional_special_tokens_ids' ) , [token_id_to_test_setters] ) | 692 | 0 |
'''simple docstring'''
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = ["model.decoder.embed_positions.weights"]
def lowerCAmelCase_ ( __A : int ):
'''simple docstring'''
if "emb" in name:
snake_case: Optional[int] = name.replace('emb' , 'model.decoder.embed_tokens' )
if "transformer" in name:
snake_case: Any = name.replace('transformer' , 'model.decoder' )
if "cross_attention" in name:
snake_case: Optional[Any] = name.replace('cross_attention' , 'encoder_attn' )
if "linear1" in name:
snake_case: Tuple = name.replace('linear1' , 'fc1' )
if "linear2" in name:
snake_case: int = name.replace('linear2' , 'fc2' )
if "norm1" in name:
snake_case: List[Any] = name.replace('norm1' , 'self_attn_layer_norm' )
if "norm_cross" in name:
snake_case: List[Any] = name.replace('norm_cross' , 'encoder_attn_layer_norm' )
if "norm2" in name:
snake_case: int = name.replace('norm2' , 'final_layer_norm' )
if "out_norm" in name:
snake_case: Optional[int] = name.replace('out_norm' , 'model.decoder.layer_norm' )
if "linears" in name:
snake_case: int = name.replace('linears' , 'lm_heads' )
if "condition_provider.conditioners.description.output_proj" in name:
snake_case: Optional[int] = name.replace('condition_provider.conditioners.description.output_proj' , 'enc_to_dec_proj' )
return name
def lowerCAmelCase_ ( __A : OrderedDict , __A : int ):
'''simple docstring'''
snake_case: Tuple = list(state_dict.keys() )
snake_case: Union[str, Any] = {}
for key in keys:
snake_case: Tuple = state_dict.pop(__A )
snake_case: Union[str, Any] = rename_keys(__A )
if "in_proj_weight" in key:
# split fused qkv proj
snake_case: str = val[:hidden_size, :]
snake_case: Dict = val[hidden_size : 2 * hidden_size, :]
snake_case: Any = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
snake_case: int = val
else:
snake_case: Any = val
return state_dict, enc_dec_proj_state_dict
def lowerCAmelCase_ ( __A : str ):
'''simple docstring'''
if checkpoint == "small":
# default config values
snake_case: Dict = 10_24
snake_case: Tuple = 24
snake_case: str = 16
elif checkpoint == "medium":
snake_case: str = 15_36
snake_case: Dict = 48
snake_case: Optional[Any] = 24
elif checkpoint == "large":
snake_case: List[str] = 20_48
snake_case: int = 48
snake_case: List[str] = 32
else:
raise ValueError(f"""Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}.""" )
snake_case: Union[str, Any] = MusicgenDecoderConfig(
hidden_size=__A , ffn_dim=hidden_size * 4 , num_hidden_layers=__A , num_attention_heads=__A , )
return config
@torch.no_grad()
def lowerCAmelCase_ ( __A : str , __A : List[Any]=None , __A : Dict=None , __A : List[Any]="cpu" ):
'''simple docstring'''
snake_case: int = MusicGen.get_pretrained(__A , device=__A )
snake_case: Dict = decoder_config_from_checkpoint(__A )
snake_case: List[str] = fairseq_model.lm.state_dict()
snake_case: List[Any] = rename_state_dict(
__A , hidden_size=decoder_config.hidden_size )
snake_case: List[Any] = TaEncoderModel.from_pretrained('t5-base' )
snake_case: List[Any] = EncodecModel.from_pretrained('facebook/encodec_32khz' )
snake_case: Tuple = MusicgenForCausalLM(__A ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
snake_case: List[Any] = decoder.load_state_dict(__A , strict=__A )
for key in missing_keys.copy():
if key.startswith(('text_encoder', 'audio_encoder') ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(__A )
if len(__A ) > 0:
raise ValueError(f"""Missing key(s) in state_dict: {missing_keys}""" )
if len(__A ) > 0:
raise ValueError(f"""Unexpected key(s) in state_dict: {unexpected_keys}""" )
# init the composite model
snake_case: Optional[Any] = MusicgenForConditionalGeneration(text_encoder=__A , audio_encoder=__A , decoder=__A )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(__A )
# check we can do a forward pass
snake_case: Optional[int] = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
snake_case: List[str] = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
snake_case: Union[str, Any] = model(input_ids=__A , decoder_input_ids=__A ).logits
if logits.shape != (8, 1, 20_48):
raise ValueError('Incorrect shape for logits' )
# now construct the processor
snake_case: Union[str, Any] = AutoTokenizer.from_pretrained('t5-base' )
snake_case: Optional[Any] = AutoFeatureExtractor.from_pretrained('facebook/encodec_32khz' , padding_side='left' )
snake_case: Optional[Any] = MusicgenProcessor(feature_extractor=__A , tokenizer=__A )
# set the appropriate bos/pad token ids
snake_case: str = 20_48
snake_case: Dict = 20_48
# set other default generation config params
snake_case: str = int(30 * audio_encoder.config.frame_rate )
snake_case: Union[str, Any] = True
snake_case: Union[str, Any] = 3.0
if pytorch_dump_folder is not None:
Path(__A ).mkdir(exist_ok=__A )
logger.info(f"""Saving model {checkpoint} to {pytorch_dump_folder}""" )
model.save_pretrained(__A )
processor.save_pretrained(__A )
if repo_id:
logger.info(f"""Pushing model {checkpoint} to {repo_id}""" )
model.push_to_hub(__A )
processor.push_to_hub(__A )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint",
default="small",
type=str,
help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.",
)
parser.add_argument(
"--pytorch_dump_folder",
required=True,
default=None,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
parser.add_argument(
"--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda."
)
__UpperCAmelCase = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub) | 703 |
'''simple docstring'''
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = "geglu" , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = "layer_norm" , SCREAMING_SNAKE_CASE__ = False , ):
'''simple docstring'''
super().__init__()
snake_case: List[str] = only_cross_attention
snake_case: Optional[Any] = (num_embeds_ada_norm is not None) and norm_type == 'ada_norm_zero'
snake_case: Tuple = (num_embeds_ada_norm is not None) and norm_type == 'ada_norm'
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
F"""`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to"""
F""" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.""" )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
snake_case: List[str] = AdaLayerNorm(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif self.use_ada_layer_norm_zero:
snake_case: str = AdaLayerNormZero(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
snake_case: int = nn.LayerNorm(SCREAMING_SNAKE_CASE__ , elementwise_affine=SCREAMING_SNAKE_CASE__ )
snake_case: List[str] = Attention(
query_dim=SCREAMING_SNAKE_CASE__ , heads=SCREAMING_SNAKE_CASE__ , dim_head=SCREAMING_SNAKE_CASE__ , dropout=SCREAMING_SNAKE_CASE__ , bias=SCREAMING_SNAKE_CASE__ , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=SCREAMING_SNAKE_CASE__ , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
snake_case: Tuple = (
AdaLayerNorm(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if self.use_ada_layer_norm
else nn.LayerNorm(SCREAMING_SNAKE_CASE__ , elementwise_affine=SCREAMING_SNAKE_CASE__ )
)
snake_case: Any = Attention(
query_dim=SCREAMING_SNAKE_CASE__ , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=SCREAMING_SNAKE_CASE__ , dim_head=SCREAMING_SNAKE_CASE__ , dropout=SCREAMING_SNAKE_CASE__ , bias=SCREAMING_SNAKE_CASE__ , upcast_attention=SCREAMING_SNAKE_CASE__ , ) # is self-attn if encoder_hidden_states is none
else:
snake_case: int = None
snake_case: Tuple = None
# 3. Feed-forward
snake_case: Union[str, Any] = nn.LayerNorm(SCREAMING_SNAKE_CASE__ , elementwise_affine=SCREAMING_SNAKE_CASE__ )
snake_case: List[str] = FeedForward(SCREAMING_SNAKE_CASE__ , dropout=SCREAMING_SNAKE_CASE__ , activation_fn=SCREAMING_SNAKE_CASE__ , final_dropout=SCREAMING_SNAKE_CASE__ )
# let chunk size default to None
snake_case: Any = None
snake_case: Any = 0
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: Dict = chunk_size
snake_case: str = dim
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , ):
'''simple docstring'''
if self.use_ada_layer_norm:
snake_case: Optional[int] = self.norma(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif self.use_ada_layer_norm_zero:
snake_case , snake_case , snake_case , snake_case , snake_case: int = self.norma(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , hidden_dtype=hidden_states.dtype )
else:
snake_case: List[str] = self.norma(SCREAMING_SNAKE_CASE__ )
snake_case: Optional[Any] = cross_attention_kwargs if cross_attention_kwargs is not None else {}
snake_case: List[str] = self.attna(
SCREAMING_SNAKE_CASE__ , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
if self.use_ada_layer_norm_zero:
snake_case: Tuple = gate_msa.unsqueeze(1 ) * attn_output
snake_case: List[str] = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
snake_case: Dict = (
self.norma(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if self.use_ada_layer_norm else self.norma(SCREAMING_SNAKE_CASE__ )
)
snake_case: Any = self.attna(
SCREAMING_SNAKE_CASE__ , encoder_hidden_states=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
snake_case: List[str] = attn_output + hidden_states
# 3. Feed-forward
snake_case: str = self.norma(SCREAMING_SNAKE_CASE__ )
if self.use_ada_layer_norm_zero:
snake_case: str = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
F"""`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.""" )
snake_case: List[str] = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
snake_case: Optional[Any] = torch.cat(
[self.ff(SCREAMING_SNAKE_CASE__ ) for hid_slice in norm_hidden_states.chunk(SCREAMING_SNAKE_CASE__ , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
snake_case: int = self.ff(SCREAMING_SNAKE_CASE__ )
if self.use_ada_layer_norm_zero:
snake_case: Union[str, Any] = gate_mlp.unsqueeze(1 ) * ff_output
snake_case: Tuple = ff_output + hidden_states
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = 4 , SCREAMING_SNAKE_CASE__ = 0.0 , SCREAMING_SNAKE_CASE__ = "geglu" , SCREAMING_SNAKE_CASE__ = False , ):
'''simple docstring'''
super().__init__()
snake_case: int = int(dim * mult )
snake_case: Optional[Any] = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
snake_case: int = GELU(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if activation_fn == "gelu-approximate":
snake_case: Optional[Any] = GELU(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , approximate='tanh' )
elif activation_fn == "geglu":
snake_case: List[Any] = GEGLU(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif activation_fn == "geglu-approximate":
snake_case: Optional[int] = ApproximateGELU(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case: Any = nn.ModuleList([] )
# project in
self.net.append(SCREAMING_SNAKE_CASE__ )
# project dropout
self.net.append(nn.Dropout(SCREAMING_SNAKE_CASE__ ) )
# project out
self.net.append(nn.Linear(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(SCREAMING_SNAKE_CASE__ ) )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
for module in self.net:
snake_case: Optional[int] = module(SCREAMING_SNAKE_CASE__ )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = "none" ):
'''simple docstring'''
super().__init__()
snake_case: Optional[int] = nn.Linear(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = approximate
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if gate.device.type != "mps":
return F.gelu(SCREAMING_SNAKE_CASE__ , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: Dict = self.proj(SCREAMING_SNAKE_CASE__ )
snake_case: Dict = self.gelu(SCREAMING_SNAKE_CASE__ )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
super().__init__()
snake_case: Tuple = nn.Linear(SCREAMING_SNAKE_CASE__ , dim_out * 2 )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if gate.device.type != "mps":
return F.gelu(SCREAMING_SNAKE_CASE__ )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case , snake_case: int = self.proj(SCREAMING_SNAKE_CASE__ ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(SCREAMING_SNAKE_CASE__ )
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
super().__init__()
snake_case: str = nn.Linear(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: Optional[int] = self.proj(SCREAMING_SNAKE_CASE__ )
return x * torch.sigmoid(1.7_02 * x )
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
super().__init__()
snake_case: Optional[Any] = nn.Embedding(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case: Optional[Any] = nn.SiLU()
snake_case: Union[str, Any] = nn.Linear(SCREAMING_SNAKE_CASE__ , embedding_dim * 2 )
snake_case: int = nn.LayerNorm(SCREAMING_SNAKE_CASE__ , elementwise_affine=SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: Dict = self.linear(self.silu(self.emb(SCREAMING_SNAKE_CASE__ ) ) )
snake_case , snake_case: Dict = torch.chunk(SCREAMING_SNAKE_CASE__ , 2 )
snake_case: str = self.norm(SCREAMING_SNAKE_CASE__ ) * (1 + scale) + shift
return x
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
super().__init__()
snake_case: str = CombinedTimestepLabelEmbeddings(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case: int = nn.SiLU()
snake_case: Any = nn.Linear(SCREAMING_SNAKE_CASE__ , 6 * embedding_dim , bias=SCREAMING_SNAKE_CASE__ )
snake_case: List[Any] = nn.LayerNorm(SCREAMING_SNAKE_CASE__ , elementwise_affine=SCREAMING_SNAKE_CASE__ , eps=1E-6 )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
snake_case: int = self.linear(self.silu(self.emb(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , hidden_dtype=SCREAMING_SNAKE_CASE__ ) ) )
snake_case , snake_case , snake_case , snake_case , snake_case , snake_case: str = emb.chunk(6 , dim=1 )
snake_case: Dict = self.norm(SCREAMING_SNAKE_CASE__ ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = 1E-5 ):
'''simple docstring'''
super().__init__()
snake_case: str = num_groups
snake_case: str = eps
if act_fn is None:
snake_case: Dict = None
else:
snake_case: List[str] = get_activation(SCREAMING_SNAKE_CASE__ )
snake_case: Any = nn.Linear(SCREAMING_SNAKE_CASE__ , out_dim * 2 )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if self.act:
snake_case: Optional[Any] = self.act(SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = self.linear(SCREAMING_SNAKE_CASE__ )
snake_case: List[Any] = emb[:, :, None, None]
snake_case , snake_case: List[Any] = emb.chunk(2 , dim=1 )
snake_case: Any = F.group_norm(SCREAMING_SNAKE_CASE__ , self.num_groups , eps=self.eps )
snake_case: Optional[int] = x * (1 + scale) + shift
return x | 692 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
__UpperCAmelCase = {
"configuration_speech_to_text": ["SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "Speech2TextConfig"],
"processing_speech_to_text": ["Speech2TextProcessor"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ["Speech2TextTokenizer"]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ["Speech2TextFeatureExtractor"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFSpeech2TextForConditionalGeneration",
"TFSpeech2TextModel",
"TFSpeech2TextPreTrainedModel",
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Speech2TextForConditionalGeneration",
"Speech2TextModel",
"Speech2TextPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 704 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class SCREAMING_SNAKE_CASE ( snake_case , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = RoCBertTokenizer
__UpperCamelCase = None
__UpperCamelCase = False
__UpperCamelCase = True
__UpperCamelCase = filter_non_english
def _UpperCamelCase ( self ):
'''simple docstring'''
super().setUp()
snake_case: Any = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', '你', '好', '是', '谁', 'a', 'b', 'c', 'd']
snake_case: List[Any] = {}
snake_case: List[str] = {}
for i, value in enumerate(SCREAMING_SNAKE_CASE__ ):
snake_case: List[Any] = i
snake_case: Union[str, Any] = i
snake_case: List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
snake_case: Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['word_shape_file'] )
snake_case: str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['word_pronunciation_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
with open(self.word_shape_file , 'w' , encoding='utf-8' ) as word_shape_writer:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ensure_ascii=SCREAMING_SNAKE_CASE__ )
with open(self.word_pronunciation_file , 'w' , encoding='utf-8' ) as word_pronunciation_writer:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ensure_ascii=SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Dict = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
snake_case: Dict = tokenizer.tokenize('你好[SEP]你是谁' )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , ['你', '好', '[SEP]', '你', '是', '谁'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(SCREAMING_SNAKE_CASE__ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(SCREAMING_SNAKE_CASE__ ) , [5, 6, 2, 5, 7, 8] )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Any = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[int] = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Union[str, Any] = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ , strip_accents=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: str = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ , strip_accents=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[int] = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: int = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Tuple = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ , strip_accents=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Any = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ , strip_accents=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: str = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Union[str, Any] = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
snake_case: Union[str, Any] = {}
for i, token in enumerate(SCREAMING_SNAKE_CASE__ ):
snake_case: str = i
snake_case: Optional[int] = RoCBertWordpieceTokenizer(vocab=SCREAMING_SNAKE_CASE__ , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
def _UpperCamelCase ( self ):
'''simple docstring'''
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def _UpperCamelCase ( self ):
'''simple docstring'''
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def _UpperCamelCase ( self ):
'''simple docstring'''
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[int] = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(SCREAMING_SNAKE_CASE__ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
if self.test_rust_tokenizer:
snake_case: int = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE__ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
def _UpperCamelCase ( self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
snake_case: Tuple = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
snake_case: Optional[Any] = F"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
snake_case: List[str] = tokenizer_r.encode_plus(
SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , return_token_type_ids=SCREAMING_SNAKE_CASE__ , return_offsets_mapping=SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , )
snake_case: Optional[int] = tokenizer_r.do_lower_case if hasattr(SCREAMING_SNAKE_CASE__ , 'do_lower_case' ) else False
snake_case: int = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'Allen'),
((21, 23), '##NL'),
((23, 24), '##P'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'allen'),
((21, 23), '##nl'),
((23, 24), '##p'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Dict = ['的', '人', '有']
snake_case: Any = ''.join(SCREAMING_SNAKE_CASE__ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
snake_case: Tuple = True
snake_case: List[Any] = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
snake_case: List[Any] = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = tokenizer_p.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
snake_case: str = tokenizer_r.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = tokenizer_r.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ )
snake_case: str = tokenizer_p.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case: Tuple = False
snake_case: Union[str, Any] = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
snake_case: Union[str, Any] = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
snake_case: Union[str, Any] = tokenizer_r.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
snake_case: int = tokenizer_p.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = tokenizer_r.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ )
snake_case: Union[str, Any] = tokenizer_p.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ )
# it is expected that only the first Chinese character is not preceded by "##".
snake_case: Union[str, Any] = [
F"""##{token}""" if idx != 0 else token for idx, token in enumerate(SCREAMING_SNAKE_CASE__ )
]
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@slow
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: int = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
snake_case: int = tokenizer.encode('你好' , add_special_tokens=SCREAMING_SNAKE_CASE__ )
snake_case: Any = tokenizer.encode('你是谁' , add_special_tokens=SCREAMING_SNAKE_CASE__ )
snake_case: Tuple = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE__ )
snake_case: str = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[str] = self.get_tokenizers(do_lower_case=SCREAMING_SNAKE_CASE__ )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
snake_case: Dict = '你好,你是谁'
snake_case: int = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
snake_case: Union[str, Any] = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
snake_case: List[str] = tokenizer.convert_tokens_to_shape_ids(SCREAMING_SNAKE_CASE__ )
snake_case: Union[str, Any] = tokenizer.convert_tokens_to_pronunciation_ids(SCREAMING_SNAKE_CASE__ )
snake_case: Optional[Any] = tokenizer.prepare_for_model(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = tokenizer.encode_plus(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) | 692 | 0 |
'''simple docstring'''
def lowerCAmelCase_ ( __A : Tuple ):
'''simple docstring'''
for i in range(0 , __A ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(' ' , end='' )
for _ in range(0 , i + 1 ): # printing stars
print('* ' , end='' )
print()
def lowerCAmelCase_ ( __A : Optional[Any] ):
'''simple docstring'''
for i in range(__A , 0 , -1 ):
for _ in range(__A , 0 , -1 ): # printing stars
print('* ' , end='' )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(' ' , end='' )
def lowerCAmelCase_ ( __A : Dict ):
'''simple docstring'''
if n <= 0:
print(' ... .... nothing printing :(' )
return
floyd(__A ) # upper half
reverse_floyd(__A ) # lower half
if __name__ == "__main__":
print(r"| /\ | |- | |- |--| |\ /| |-")
print(r"|/ \| |- |_ |_ |__| | \/ | |_")
__UpperCAmelCase = 1
while K:
__UpperCAmelCase = int(input("enter the number and , and see the magic : "))
print()
pretty_print(user_number)
__UpperCAmelCase = int(input("press 0 to exit... and 1 to continue..."))
print("Good Bye...") | 705 |
'''simple docstring'''
from math import asin, atan, cos, radians, sin, sqrt, tan
__UpperCAmelCase = 6378137.0
__UpperCAmelCase = 6356752.314245
__UpperCAmelCase = 6_378_137
def lowerCAmelCase_ ( __A : float , __A : float , __A : float , __A : float ):
'''simple docstring'''
snake_case: Optional[Any] = (AXIS_A - AXIS_B) / AXIS_A
snake_case: List[Any] = atan((1 - flattening) * tan(radians(__A ) ) )
snake_case: List[Any] = atan((1 - flattening) * tan(radians(__A ) ) )
snake_case: Tuple = radians(__A )
snake_case: Tuple = radians(__A )
# Equation
snake_case: List[Any] = sin((phi_a - phi_a) / 2 )
snake_case: Dict = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
snake_case: Union[str, Any] = sqrt(sin_sq_phi + (cos(__A ) * cos(__A ) * sin_sq_lambda) )
return 2 * RADIUS * asin(__A )
if __name__ == "__main__":
import doctest
doctest.testmod() | 692 | 0 |
'''simple docstring'''
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=13 , SCREAMING_SNAKE_CASE__=7 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=99 , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=37 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=5_12 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=None , ):
'''simple docstring'''
snake_case: List[str] = parent
snake_case: int = batch_size
snake_case: Any = seq_length
snake_case: Optional[int] = is_training
snake_case: List[Any] = use_input_mask
snake_case: Union[str, Any] = use_token_type_ids
snake_case: str = use_labels
snake_case: Optional[int] = vocab_size
snake_case: Tuple = hidden_size
snake_case: Dict = num_hidden_layers
snake_case: Optional[Any] = num_attention_heads
snake_case: Optional[Any] = intermediate_size
snake_case: Optional[int] = hidden_act
snake_case: int = hidden_dropout_prob
snake_case: List[str] = attention_probs_dropout_prob
snake_case: Optional[int] = max_position_embeddings
snake_case: List[str] = type_vocab_size
snake_case: Union[str, Any] = type_sequence_label_size
snake_case: Dict = initializer_range
snake_case: Dict = num_labels
snake_case: Any = num_choices
snake_case: str = scope
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case: List[str] = None
if self.use_input_mask:
snake_case: Dict = random_attention_mask([self.batch_size, self.seq_length] )
snake_case: Optional[int] = None
if self.use_token_type_ids:
snake_case: Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case: int = None
snake_case: Dict = None
snake_case: List[Any] = None
if self.use_labels:
snake_case: Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case: Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case: Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
snake_case: Optional[int] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCamelCase ( self ):
'''simple docstring'''
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE__ , initializer_range=self.initializer_range , )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: List[str] = BioGptModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
snake_case: List[Any] = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ )
snake_case: Union[str, Any] = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ):
'''simple docstring'''
snake_case: List[str] = BioGptForCausalLM(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
snake_case: Tuple = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: List[str] = BioGptModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
# create attention mask
snake_case: Any = torch.ones(input_ids.shape , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ )
snake_case: Any = self.seq_length // 2
snake_case: List[str] = 0
# first forward pass
snake_case: Any = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ ).to_tuple()
# create hypothetical next token and extent to next_input_ids
snake_case: Union[str, Any] = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
snake_case: Optional[int] = ids_tensor((1,) , SCREAMING_SNAKE_CASE__ ).item() + 1
snake_case: Optional[Any] = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
snake_case: str = random_other_next_tokens
# append to next input_ids and attn_mask
snake_case: Any = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case: Dict = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ )] , dim=1 , )
# get two different outputs
snake_case: Optional[int] = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ )['last_hidden_state']
snake_case: Optional[Any] = model(SCREAMING_SNAKE_CASE__ , past_key_values=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ )['last_hidden_state']
# select random slice
snake_case: Any = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case: Any = output_from_no_past[:, -1, random_slice_idx].detach()
snake_case: Optional[int] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1E-3 ) )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: Optional[Any] = BioGptModel(config=SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ ).eval()
snake_case: Union[str, Any] = torch.ones(input_ids.shape , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ )
# first forward pass
snake_case: List[str] = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , use_cache=SCREAMING_SNAKE_CASE__ )
snake_case: Dict = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
snake_case: Dict = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case: int = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
snake_case: Any = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case: Optional[int] = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
snake_case: Optional[int] = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ )['last_hidden_state']
snake_case: Optional[int] = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , past_key_values=SCREAMING_SNAKE_CASE__ )[
'last_hidden_state'
]
# select random slice
snake_case: str = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case: Optional[int] = output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case: List[str] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1E-3 ) )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False ):
'''simple docstring'''
snake_case: str = BioGptForCausalLM(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
snake_case: Optional[Any] = model(SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: Any = BioGptModel(SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.0_01 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: List[str] = self.num_labels
snake_case: Any = BioGptForTokenClassification(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
snake_case: Optional[Any] = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: int = self.prepare_config_and_inputs()
(
snake_case
): List[str] = config_and_inputs
snake_case: int = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
__UpperCamelCase = (BioGptForCausalLM,) if is_torch_available() else ()
__UpperCamelCase = (
{
"feature-extraction": BioGptModel,
"text-classification": BioGptForSequenceClassification,
"text-generation": BioGptForCausalLM,
"token-classification": BioGptForTokenClassification,
"zero-shot": BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase = False
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: int = BioGptModelTester(self )
snake_case: Optional[int] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , hidden_size=37 )
def _UpperCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case: Optional[Any] = type
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*SCREAMING_SNAKE_CASE__ , gradient_checkpointing=SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*SCREAMING_SNAKE_CASE__ )
@slow
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Any = BioGptForCausalLM.from_pretrained('microsoft/biogpt' )
model.to(SCREAMING_SNAKE_CASE__ )
snake_case: List[str] = BioGptTokenizer.from_pretrained('microsoft/biogpt' )
snake_case: Optional[int] = 'left'
# Define PAD Token = EOS Token = 50256
snake_case: Optional[Any] = tokenizer.eos_token
snake_case: Optional[Any] = model.config.eos_token_id
# use different length sentences to test batching
snake_case: Tuple = [
'Hello, my dog is a little',
'Today, I',
]
snake_case: int = tokenizer(SCREAMING_SNAKE_CASE__ , return_tensors='pt' , padding=SCREAMING_SNAKE_CASE__ )
snake_case: str = inputs['input_ids'].to(SCREAMING_SNAKE_CASE__ )
snake_case: List[str] = model.generate(
input_ids=SCREAMING_SNAKE_CASE__ , attention_mask=inputs['attention_mask'].to(SCREAMING_SNAKE_CASE__ ) , )
snake_case: Optional[int] = tokenizer(sentences[0] , return_tensors='pt' ).input_ids.to(SCREAMING_SNAKE_CASE__ )
snake_case: Union[str, Any] = model.generate(input_ids=SCREAMING_SNAKE_CASE__ )
snake_case: int = inputs_non_padded.shape[-1] - inputs['attention_mask'][-1].long().sum().cpu().item()
snake_case: Tuple = tokenizer(sentences[1] , return_tensors='pt' ).input_ids.to(SCREAMING_SNAKE_CASE__ )
snake_case: Tuple = model.generate(input_ids=SCREAMING_SNAKE_CASE__ , max_length=model.config.max_length - num_paddings )
snake_case: Union[str, Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
snake_case: str = tokenizer.decode(output_non_padded[0] , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = tokenizer.decode(output_padded[0] , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
snake_case: Union[str, Any] = [
'Hello, my dog is a little bit bigger than a little bit.',
'Today, I have a good idea of how to use the information',
]
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , [non_padded_sentence, padded_sentence] )
@slow
def _UpperCamelCase ( self ):
'''simple docstring'''
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case: str = BioGptModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Dict = self.model_tester.prepare_config_and_inputs_for_common()
snake_case: str = 3
snake_case: List[str] = input_dict['input_ids']
snake_case: Any = input_ids.ne(1 ).to(SCREAMING_SNAKE_CASE__ )
snake_case: List[str] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
snake_case: Optional[Any] = BioGptForSequenceClassification(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
snake_case: int = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Dict = self.model_tester.prepare_config_and_inputs_for_common()
snake_case: List[Any] = 3
snake_case: int = 'multi_label_classification'
snake_case: Dict = input_dict['input_ids']
snake_case: List[Any] = input_ids.ne(1 ).to(SCREAMING_SNAKE_CASE__ )
snake_case: int = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
snake_case: int = BioGptForSequenceClassification(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
snake_case: str = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@slow
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Union[str, Any] = BioGptForCausalLM.from_pretrained('microsoft/biogpt' )
snake_case: Any = torch.tensor([[2, 48_05, 9, 6_56, 21]] )
snake_case: List[Any] = model(SCREAMING_SNAKE_CASE__ )[0]
snake_case: List[str] = 4_23_84
snake_case: Any = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE__ )
snake_case: Dict = torch.tensor(
[[[-9.52_36, -9.89_18, 10.45_57], [-11.04_69, -9.64_23, 8.10_22], [-8.86_64, -7.88_26, 5.53_25]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 ) )
@slow
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[str] = BioGptTokenizer.from_pretrained('microsoft/biogpt' )
snake_case: Optional[Any] = BioGptForCausalLM.from_pretrained('microsoft/biogpt' )
model.to(SCREAMING_SNAKE_CASE__ )
torch.manual_seed(0 )
snake_case: Optional[int] = tokenizer('COVID-19 is' , return_tensors='pt' ).to(SCREAMING_SNAKE_CASE__ )
snake_case: int = model.generate(
**SCREAMING_SNAKE_CASE__ , min_length=1_00 , max_length=10_24 , num_beams=5 , early_stopping=SCREAMING_SNAKE_CASE__ , )
snake_case: str = tokenizer.decode(output_ids[0] , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = (
'COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'
' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'
' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'
' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'
' more than 800,000 deaths.'
)
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) | 706 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase = {
"configuration_roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerOnnxConfig"],
"tokenization_roformer": ["RoFormerTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ["RoFormerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoFormerForCausalLM",
"RoFormerForMaskedLM",
"RoFormerForMultipleChoice",
"RoFormerForQuestionAnswering",
"RoFormerForSequenceClassification",
"RoFormerForTokenClassification",
"RoFormerLayer",
"RoFormerModel",
"RoFormerPreTrainedModel",
"load_tf_weights_in_roformer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRoFormerForCausalLM",
"TFRoFormerForMaskedLM",
"TFRoFormerForMultipleChoice",
"TFRoFormerForQuestionAnswering",
"TFRoFormerForSequenceClassification",
"TFRoFormerForTokenClassification",
"TFRoFormerLayer",
"TFRoFormerModel",
"TFRoFormerPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxRoFormerForMaskedLM",
"FlaxRoFormerForMultipleChoice",
"FlaxRoFormerForQuestionAnswering",
"FlaxRoFormerForSequenceClassification",
"FlaxRoFormerForTokenClassification",
"FlaxRoFormerModel",
"FlaxRoFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 692 | 0 |
'''simple docstring'''
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=13 , SCREAMING_SNAKE_CASE__=30 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=37 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=10 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=0.6 , SCREAMING_SNAKE_CASE__=None , ):
'''simple docstring'''
snake_case: Optional[int] = parent
snake_case: List[Any] = batch_size
snake_case: int = image_size
snake_case: Optional[Any] = patch_size
snake_case: str = num_channels
snake_case: Optional[int] = is_training
snake_case: Dict = use_labels
snake_case: List[Any] = hidden_size
snake_case: Optional[Any] = num_hidden_layers
snake_case: Tuple = num_attention_heads
snake_case: List[Any] = intermediate_size
snake_case: List[str] = hidden_act
snake_case: str = hidden_dropout_prob
snake_case: Union[str, Any] = attention_probs_dropout_prob
snake_case: Dict = type_sequence_label_size
snake_case: str = initializer_range
snake_case: Union[str, Any] = mask_ratio
snake_case: Dict = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
snake_case: List[Any] = (image_size // patch_size) ** 2
snake_case: Optional[int] = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case: List[Any] = None
if self.use_labels:
snake_case: Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case: List[Any] = self.get_config()
return config, pixel_values, labels
def _UpperCamelCase ( self ):
'''simple docstring'''
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE__ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: int = TFViTMAEModel(config=SCREAMING_SNAKE_CASE__ )
snake_case: Any = model(SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: Optional[Any] = TFViTMAEForPreTraining(SCREAMING_SNAKE_CASE__ )
snake_case: List[str] = model(SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ )
# expected sequence length = num_patches
snake_case: Any = (self.image_size // self.patch_size) ** 2
snake_case: Union[str, Any] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
snake_case: Optional[int] = 1
snake_case: Union[str, Any] = TFViTMAEForPreTraining(SCREAMING_SNAKE_CASE__ )
snake_case: Optional[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case: int = model(SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ )
snake_case: List[str] = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Union[str, Any] = self.prepare_config_and_inputs()
(snake_case): int = config_and_inputs
snake_case: str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE ( snake_case , snake_case , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
__UpperCamelCase = {"feature-extraction": TFViTMAEModel} if is_tf_available() else {}
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[Any] = TFViTMAEModelTester(self )
snake_case: Dict = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , has_text_modality=SCREAMING_SNAKE_CASE__ , hidden_size=37 )
def _UpperCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMAE does not use inputs_embeds' )
def _UpperCamelCase ( self ):
'''simple docstring'''
pass
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case: Optional[int] = model_class(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
snake_case: Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE__ , tf.keras.layers.Layer ) )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case: Union[str, Any] = model_class(SCREAMING_SNAKE_CASE__ )
snake_case: Optional[Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case: Union[str, Any] = [*signature.parameters.keys()]
snake_case: List[str] = ['pixel_values']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
np.random.seed(2 )
snake_case: Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case: List[str] = int((config.image_size // config.patch_size) ** 2 )
snake_case: List[str] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
snake_case: Optional[Any] = model_class(SCREAMING_SNAKE_CASE__ )
snake_case: Any = self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case: List[Any] = model(SCREAMING_SNAKE_CASE__ , noise=SCREAMING_SNAKE_CASE__ )
snake_case: Optional[Any] = copy.deepcopy(self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
snake_case: Optional[Any] = model(**SCREAMING_SNAKE_CASE__ , noise=SCREAMING_SNAKE_CASE__ )
snake_case: Dict = outputs_dict[0].numpy()
snake_case: Optional[int] = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1E-6 )
def _UpperCamelCase ( self ):
'''simple docstring'''
np.random.seed(2 )
snake_case: Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case: List[Any] = int((config.image_size // config.patch_size) ** 2 )
snake_case: int = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(SCREAMING_SNAKE_CASE__ ):
snake_case: List[str] = {}
for k, v in inputs_dict.items():
if tf.is_tensor(SCREAMING_SNAKE_CASE__ ):
snake_case: Optional[Any] = v.numpy()
else:
snake_case: Tuple = np.array(SCREAMING_SNAKE_CASE__ )
return inputs_np_dict
for model_class in self.all_model_classes:
snake_case: Optional[int] = model_class(SCREAMING_SNAKE_CASE__ )
snake_case: str = self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case: Tuple = prepare_numpy_arrays(SCREAMING_SNAKE_CASE__ )
snake_case: int = model(SCREAMING_SNAKE_CASE__ , noise=SCREAMING_SNAKE_CASE__ )
snake_case: Tuple = model(**SCREAMING_SNAKE_CASE__ , noise=SCREAMING_SNAKE_CASE__ )
self.assert_outputs_same(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
np.random.seed(2 )
snake_case: Optional[Any] = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
snake_case: Any = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
snake_case: str = tf.constant(SCREAMING_SNAKE_CASE__ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
snake_case: Tuple = tf_noise
super().check_pt_tf_models(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
np.random.seed(2 )
snake_case: int = self.model_tester.prepare_config_and_inputs_for_common()
snake_case: List[str] = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(SCREAMING_SNAKE_CASE__ )
if module_member_name.endswith('MainLayer' )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len('MainLayer' )] == model_class.__name__[: -len('Model' )]
for module_member in (getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ),)
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(SCREAMING_SNAKE_CASE__ , '_keras_serializable' , SCREAMING_SNAKE_CASE__ )
}
snake_case: List[str] = int((config.image_size // config.patch_size) ** 2 )
snake_case: str = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
snake_case: Tuple = tf.convert_to_tensor(SCREAMING_SNAKE_CASE__ )
inputs_dict.update({'noise': noise} )
for main_layer_class in tf_main_layer_classes:
snake_case: Optional[int] = main_layer_class(SCREAMING_SNAKE_CASE__ )
snake_case: Tuple = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
snake_case: List[str] = tf.keras.Model(SCREAMING_SNAKE_CASE__ , outputs=main_layer(SCREAMING_SNAKE_CASE__ ) )
snake_case: Optional[Any] = model(SCREAMING_SNAKE_CASE__ )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case: Optional[int] = os.path.join(SCREAMING_SNAKE_CASE__ , 'keras_model.h5' )
model.save(SCREAMING_SNAKE_CASE__ )
snake_case: List[str] = tf.keras.models.load_model(
SCREAMING_SNAKE_CASE__ , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(SCREAMING_SNAKE_CASE__ , tf.keras.Model )
snake_case: Any = model(SCREAMING_SNAKE_CASE__ )
self.assert_outputs_same(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@slow
def _UpperCamelCase ( self ):
'''simple docstring'''
np.random.seed(2 )
snake_case: List[str] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case: Optional[int] = int((config.image_size // config.patch_size) ** 2 )
snake_case: Union[str, Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
snake_case: Dict = model_class(SCREAMING_SNAKE_CASE__ )
snake_case: Union[str, Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case: List[Any] = model(SCREAMING_SNAKE_CASE__ , noise=SCREAMING_SNAKE_CASE__ )
if model_class.__name__ == "TFViTMAEModel":
snake_case: int = outputs.last_hidden_state.numpy()
snake_case: Any = 0
else:
snake_case: Any = outputs.logits.numpy()
snake_case: Union[str, Any] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(SCREAMING_SNAKE_CASE__ , saved_model=SCREAMING_SNAKE_CASE__ )
snake_case: str = model_class.from_pretrained(SCREAMING_SNAKE_CASE__ )
snake_case: Any = model(SCREAMING_SNAKE_CASE__ , noise=SCREAMING_SNAKE_CASE__ )
if model_class.__name__ == "TFViTMAEModel":
snake_case: List[str] = after_outputs['last_hidden_state'].numpy()
snake_case: Optional[Any] = 0
else:
snake_case: Union[str, Any] = after_outputs['logits'].numpy()
snake_case: List[Any] = 0
snake_case: Optional[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(SCREAMING_SNAKE_CASE__ , 1E-5 )
def _UpperCamelCase ( self ):
'''simple docstring'''
np.random.seed(2 )
snake_case: List[str] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case: str = int((config.image_size // config.patch_size) ** 2 )
snake_case: Union[str, Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
snake_case: Optional[Any] = model_class(SCREAMING_SNAKE_CASE__ )
snake_case: List[Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case: Tuple = model(SCREAMING_SNAKE_CASE__ , noise=SCREAMING_SNAKE_CASE__ )
snake_case: Optional[Any] = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(SCREAMING_SNAKE_CASE__ )
snake_case: str = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
snake_case: str = model_class.from_config(model.config )
snake_case: Union[str, Any] = new_model(SCREAMING_SNAKE_CASE__ ) # Build model
new_model.set_weights(model.get_weights() )
snake_case: Optional[Any] = new_model(SCREAMING_SNAKE_CASE__ , noise=SCREAMING_SNAKE_CASE__ )
self.assert_outputs_same(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def _UpperCamelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load' )
def _UpperCamelCase ( self ):
'''simple docstring'''
pass
@slow
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Any = TFViTMAEModel.from_pretrained('google/vit-base-patch16-224' )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def lowerCAmelCase_ ( ):
'''simple docstring'''
snake_case: Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _UpperCamelCase ( self ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained('facebook/vit-mae-base' ) if is_vision_available() else None
@slow
def _UpperCamelCase ( self ):
'''simple docstring'''
np.random.seed(2 )
snake_case: int = TFViTMAEForPreTraining.from_pretrained('facebook/vit-mae-base' )
snake_case: Dict = self.default_image_processor
snake_case: Optional[Any] = prepare_img()
snake_case: int = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='tf' )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
snake_case: Optional[int] = ViTMAEConfig()
snake_case: Any = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
snake_case: int = np.random.uniform(size=(1, num_patches) )
# forward pass
snake_case: Union[str, Any] = model(**SCREAMING_SNAKE_CASE__ , noise=SCREAMING_SNAKE_CASE__ )
# verify the logits
snake_case: List[Any] = tf.convert_to_tensor([1, 1_96, 7_68] )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE__ )
snake_case: int = tf.convert_to_tensor(
[[-0.05_48, -1.70_23, -0.93_25], [0.37_21, -0.56_70, -0.22_33], [0.82_35, -1.38_78, -0.35_24]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 ) | 707 |
'''simple docstring'''
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def lowerCAmelCase_ ( __A : Tuple ):
'''simple docstring'''
snake_case: Tuple = model.config
snake_case: str = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=1_28 , )
snake_case: Optional[Any] = MBartConfig(
is_decoder=__A , is_encoder_decoder=__A , add_cross_attention=__A , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=__A , add_final_layer_norm=__A , )
return encoder_config, decoder_config
def lowerCAmelCase_ ( __A : int ):
'''simple docstring'''
if "encoder.model" in name:
snake_case: Optional[Any] = name.replace('encoder.model' , 'encoder' )
if "decoder.model" in name:
snake_case: str = name.replace('decoder.model' , 'decoder' )
if "patch_embed.proj" in name:
snake_case: Any = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
snake_case: Optional[int] = name.replace('patch_embed.norm' , 'embeddings.norm' )
if name.startswith('encoder' ):
if "layers" in name:
snake_case: Tuple = 'encoder.' + name
if "attn.proj" in name:
snake_case: Optional[int] = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name and "mask" not in name:
snake_case: Dict = name.replace('attn' , 'attention.self' )
if "norm1" in name:
snake_case: Union[str, Any] = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
snake_case: Dict = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
snake_case: List[str] = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
snake_case: Dict = name.replace('mlp.fc2' , 'output.dense' )
if name == "encoder.norm.weight":
snake_case: Dict = 'encoder.layernorm.weight'
if name == "encoder.norm.bias":
snake_case: int = 'encoder.layernorm.bias'
return name
def lowerCAmelCase_ ( __A : List[Any] , __A : Optional[Any] ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
snake_case: List[Any] = orig_state_dict.pop(__A )
if "qkv" in key:
snake_case: Union[str, Any] = key.split('.' )
snake_case: Optional[Any] = int(key_split[3] )
snake_case: Any = int(key_split[5] )
snake_case: Union[str, Any] = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
snake_case: Union[str, Any] = val[:dim, :]
snake_case: Any = val[dim : dim * 2, :]
snake_case: List[str] = val[-dim:, :]
else:
snake_case: str = val[:dim]
snake_case: Union[str, Any] = val[dim : dim * 2]
snake_case: List[Any] = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
snake_case: Optional[int] = val
return orig_state_dict
def lowerCAmelCase_ ( __A : List[Any] , __A : Any=None , __A : List[str]=False ):
'''simple docstring'''
snake_case: str = DonutModel.from_pretrained(__A ).eval()
# load HuggingFace model
snake_case , snake_case: Optional[Any] = get_configs(__A )
snake_case: Optional[int] = DonutSwinModel(__A )
snake_case: Tuple = MBartForCausalLM(__A )
snake_case: Optional[Any] = VisionEncoderDecoderModel(encoder=__A , decoder=__A )
model.eval()
snake_case: Optional[int] = original_model.state_dict()
snake_case: Optional[int] = convert_state_dict(__A , __A )
model.load_state_dict(__A )
# verify results on scanned document
snake_case: Union[str, Any] = load_dataset('hf-internal-testing/example-documents' )
snake_case: str = dataset['test'][0]['image'].convert('RGB' )
snake_case: Optional[int] = XLMRobertaTokenizerFast.from_pretrained(__A , from_slow=__A )
snake_case: Any = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
snake_case: Dict = DonutProcessor(__A , __A )
snake_case: Optional[Any] = processor(__A , return_tensors='pt' ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
snake_case: int = '<s_docvqa><s_question>{user_input}</s_question><s_answer>'
snake_case: Optional[Any] = 'When is the coffee break?'
snake_case: Optional[int] = task_prompt.replace('{user_input}' , __A )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
snake_case: Dict = '<s_rvlcdip>'
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
snake_case: str = '<s_cord>'
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
snake_case: str = 's_cord-v2>'
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
snake_case: int = '<s_zhtrainticket>'
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
snake_case: Optional[Any] = 'hello world'
else:
raise ValueError('Model name not supported' )
snake_case: Optional[int] = original_model.decoder.tokenizer(__A , add_special_tokens=__A , return_tensors='pt' )[
'input_ids'
]
snake_case: Any = original_model.encoder.model.patch_embed(__A )
snake_case , snake_case: Dict = model.encoder.embeddings(__A )
assert torch.allclose(__A , __A , atol=1E-3 )
# verify encoder hidden states
snake_case: Tuple = original_model.encoder(__A )
snake_case: List[str] = model.encoder(__A ).last_hidden_state
assert torch.allclose(__A , __A , atol=1E-2 )
# verify decoder hidden states
snake_case: List[Any] = original_model(__A , __A , __A ).logits
snake_case: List[Any] = model(__A , decoder_input_ids=__A ).logits
assert torch.allclose(__A , __A , atol=1E-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(f"""Saving model and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(__A )
processor.save_pretrained(__A )
if push_to_hub:
model.push_to_hub('nielsr/' + model_name.split('/' )[-1] , commit_message='Update model' )
processor.push_to_hub('nielsr/' + model_name.split('/' )[-1] , commit_message='Update model' )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="naver-clova-ix/donut-base-finetuned-docvqa",
required=False,
type=str,
help="Name of the original model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
required=False,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether or not to push the converted model and processor to the 🤗 hub.",
)
__UpperCAmelCase = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 692 | 0 |
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class SCREAMING_SNAKE_CASE ( snake_case ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ):
'''simple docstring'''
super().__init__()
snake_case: str = value_function
snake_case: Union[str, Any] = unet
snake_case: int = scheduler
snake_case: Any = env
snake_case: List[str] = env.get_dataset()
snake_case: str = {}
for key in self.data.keys():
try:
snake_case: List[str] = self.data[key].mean()
except: # noqa: E722
pass
snake_case: Any = {}
for key in self.data.keys():
try:
snake_case: str = self.data[key].std()
except: # noqa: E722
pass
snake_case: int = env.observation_space.shape[0]
snake_case: Optional[int] = env.action_space.shape[0]
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return (x_in - self.means[key]) / self.stds[key]
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return x_in * self.stds[key] + self.means[key]
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if type(SCREAMING_SNAKE_CASE__ ) is dict:
return {k: self.to_torch(SCREAMING_SNAKE_CASE__ ) for k, v in x_in.items()}
elif torch.is_tensor(SCREAMING_SNAKE_CASE__ ):
return x_in.to(self.unet.device )
return torch.tensor(SCREAMING_SNAKE_CASE__ , device=self.unet.device )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
for key, val in cond.items():
snake_case: Optional[int] = val.clone()
return x_in
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: List[Any] = x.shape[0]
snake_case: Optional[Any] = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
snake_case: int = torch.full((batch_size,) , SCREAMING_SNAKE_CASE__ , device=self.unet.device , dtype=torch.long )
for _ in range(SCREAMING_SNAKE_CASE__ ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
snake_case: int = self.value_function(x.permute(0 , 2 , 1 ) , SCREAMING_SNAKE_CASE__ ).sample
snake_case: Any = torch.autograd.grad([y.sum()] , [x] )[0]
snake_case: List[Any] = self.scheduler._get_variance(SCREAMING_SNAKE_CASE__ )
snake_case: Tuple = torch.exp(0.5 * posterior_variance )
snake_case: Any = model_std * grad
snake_case: Optional[int] = 0
snake_case: Dict = x.detach()
snake_case: List[str] = x + scale * grad
snake_case: Optional[Any] = self.reset_xa(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , self.action_dim )
snake_case: List[str] = self.unet(x.permute(0 , 2 , 1 ) , SCREAMING_SNAKE_CASE__ ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
snake_case: Tuple = self.scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , predict_epsilon=SCREAMING_SNAKE_CASE__ )['prev_sample']
# apply conditions to the trajectory (set the initial state)
snake_case: int = self.reset_xa(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , self.action_dim )
snake_case: List[str] = self.to_torch(SCREAMING_SNAKE_CASE__ )
return x, y
def __call__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=64 , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.1 ):
'''simple docstring'''
snake_case: Any = self.normalize(SCREAMING_SNAKE_CASE__ , 'observations' )
snake_case: Union[str, Any] = obs[None].repeat(SCREAMING_SNAKE_CASE__ , axis=0 )
snake_case: Optional[int] = {0: self.to_torch(SCREAMING_SNAKE_CASE__ )}
snake_case: Union[str, Any] = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
snake_case: Optional[Any] = randn_tensor(SCREAMING_SNAKE_CASE__ , device=self.unet.device )
snake_case: Optional[Any] = self.reset_xa(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , self.action_dim )
snake_case: Dict = self.to_torch(SCREAMING_SNAKE_CASE__ )
# run the diffusion process
snake_case: List[str] = self.run_diffusion(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# sort output trajectories by value
snake_case: Any = y.argsort(0 , descending=SCREAMING_SNAKE_CASE__ ).squeeze()
snake_case: List[str] = x[sorted_idx]
snake_case: Dict = sorted_values[:, :, : self.action_dim]
snake_case: Tuple = actions.detach().cpu().numpy()
snake_case: Tuple = self.de_normalize(SCREAMING_SNAKE_CASE__ , key='actions' )
# select the action with the highest value
if y is not None:
snake_case: Optional[Any] = 0
else:
# if we didn't run value guiding, select a random action
snake_case: List[str] = np.random.randint(0 , SCREAMING_SNAKE_CASE__ )
snake_case: str = denorm_actions[selected_index, 0]
return denorm_actions | 708 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[Any] = {
'task_specific_params': {
'summarization': {'length_penalty': 1.0, 'max_length': 1_28, 'min_length': 12, 'num_beams': 4},
'summarization_cnn': {'length_penalty': 2.0, 'max_length': 1_42, 'min_length': 56, 'num_beams': 4},
'summarization_xsum': {'length_penalty': 1.0, 'max_length': 62, 'min_length': 11, 'num_beams': 6},
}
}
snake_case: Union[str, Any] = {
'task_specific_params.summarization.length_penalty': 1.0,
'task_specific_params.summarization.max_length': 1_28,
'task_specific_params.summarization.min_length': 12,
'task_specific_params.summarization.num_beams': 4,
'task_specific_params.summarization_cnn.length_penalty': 2.0,
'task_specific_params.summarization_cnn.max_length': 1_42,
'task_specific_params.summarization_cnn.min_length': 56,
'task_specific_params.summarization_cnn.num_beams': 4,
'task_specific_params.summarization_xsum.length_penalty': 1.0,
'task_specific_params.summarization_xsum.max_length': 62,
'task_specific_params.summarization_xsum.min_length': 11,
'task_specific_params.summarization_xsum.num_beams': 6,
}
self.assertEqual(flatten_dict(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Dict = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(SCREAMING_SNAKE_CASE__ ) , x.transpose() ) )
snake_case: List[str] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(SCREAMING_SNAKE_CASE__ , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Tuple = np.random.randn(3 , 4 )
snake_case: Optional[Any] = torch.tensor(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(transpose(SCREAMING_SNAKE_CASE__ ) , transpose(SCREAMING_SNAKE_CASE__ ).numpy() ) )
snake_case: Optional[Any] = np.random.randn(3 , 4 , 5 )
snake_case: Optional[int] = torch.tensor(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(transpose(SCREAMING_SNAKE_CASE__ , axes=(1, 2, 0) ) , transpose(SCREAMING_SNAKE_CASE__ , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: str = np.random.randn(3 , 4 )
snake_case: Optional[Any] = tf.constant(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(transpose(SCREAMING_SNAKE_CASE__ ) , transpose(SCREAMING_SNAKE_CASE__ ).numpy() ) )
snake_case: Dict = np.random.randn(3 , 4 , 5 )
snake_case: str = tf.constant(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(transpose(SCREAMING_SNAKE_CASE__ , axes=(1, 2, 0) ) , transpose(SCREAMING_SNAKE_CASE__ , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: int = np.random.randn(3 , 4 )
snake_case: Optional[int] = jnp.array(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(transpose(SCREAMING_SNAKE_CASE__ ) , np.asarray(transpose(SCREAMING_SNAKE_CASE__ ) ) ) )
snake_case: Any = np.random.randn(3 , 4 , 5 )
snake_case: Optional[Any] = jnp.array(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(transpose(SCREAMING_SNAKE_CASE__ , axes=(1, 2, 0) ) , np.asarray(transpose(SCREAMING_SNAKE_CASE__ , axes=(1, 2, 0) ) ) ) )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[Any] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(SCREAMING_SNAKE_CASE__ , (4, 3) ) , np.reshape(SCREAMING_SNAKE_CASE__ , (4, 3) ) ) )
snake_case: Optional[int] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(SCREAMING_SNAKE_CASE__ , (12, 5) ) , np.reshape(SCREAMING_SNAKE_CASE__ , (12, 5) ) ) )
@require_torch
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[Any] = np.random.randn(3 , 4 )
snake_case: Tuple = torch.tensor(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(reshape(SCREAMING_SNAKE_CASE__ , (4, 3) ) , reshape(SCREAMING_SNAKE_CASE__ , (4, 3) ).numpy() ) )
snake_case: List[str] = np.random.randn(3 , 4 , 5 )
snake_case: Tuple = torch.tensor(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(reshape(SCREAMING_SNAKE_CASE__ , (12, 5) ) , reshape(SCREAMING_SNAKE_CASE__ , (12, 5) ).numpy() ) )
@require_tf
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[Any] = np.random.randn(3 , 4 )
snake_case: Tuple = tf.constant(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(reshape(SCREAMING_SNAKE_CASE__ , (4, 3) ) , reshape(SCREAMING_SNAKE_CASE__ , (4, 3) ).numpy() ) )
snake_case: Any = np.random.randn(3 , 4 , 5 )
snake_case: Union[str, Any] = tf.constant(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(reshape(SCREAMING_SNAKE_CASE__ , (12, 5) ) , reshape(SCREAMING_SNAKE_CASE__ , (12, 5) ).numpy() ) )
@require_flax
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: int = np.random.randn(3 , 4 )
snake_case: str = jnp.array(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(reshape(SCREAMING_SNAKE_CASE__ , (4, 3) ) , np.asarray(reshape(SCREAMING_SNAKE_CASE__ , (4, 3) ) ) ) )
snake_case: Any = np.random.randn(3 , 4 , 5 )
snake_case: List[str] = jnp.array(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(reshape(SCREAMING_SNAKE_CASE__ , (12, 5) ) , np.asarray(reshape(SCREAMING_SNAKE_CASE__ , (12, 5) ) ) ) )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: int = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(SCREAMING_SNAKE_CASE__ ) , np.squeeze(SCREAMING_SNAKE_CASE__ ) ) )
snake_case: List[str] = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(SCREAMING_SNAKE_CASE__ , axis=2 ) , np.squeeze(SCREAMING_SNAKE_CASE__ , axis=2 ) ) )
@require_torch
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: int = np.random.randn(1 , 3 , 4 )
snake_case: List[str] = torch.tensor(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(squeeze(SCREAMING_SNAKE_CASE__ ) , squeeze(SCREAMING_SNAKE_CASE__ ).numpy() ) )
snake_case: Optional[Any] = np.random.randn(1 , 4 , 1 , 5 )
snake_case: int = torch.tensor(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(squeeze(SCREAMING_SNAKE_CASE__ , axis=2 ) , squeeze(SCREAMING_SNAKE_CASE__ , axis=2 ).numpy() ) )
@require_tf
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Any = np.random.randn(1 , 3 , 4 )
snake_case: Optional[Any] = tf.constant(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(squeeze(SCREAMING_SNAKE_CASE__ ) , squeeze(SCREAMING_SNAKE_CASE__ ).numpy() ) )
snake_case: Union[str, Any] = np.random.randn(1 , 4 , 1 , 5 )
snake_case: Union[str, Any] = tf.constant(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(squeeze(SCREAMING_SNAKE_CASE__ , axis=2 ) , squeeze(SCREAMING_SNAKE_CASE__ , axis=2 ).numpy() ) )
@require_flax
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[int] = np.random.randn(1 , 3 , 4 )
snake_case: List[Any] = jnp.array(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(squeeze(SCREAMING_SNAKE_CASE__ ) , np.asarray(squeeze(SCREAMING_SNAKE_CASE__ ) ) ) )
snake_case: Tuple = np.random.randn(1 , 4 , 1 , 5 )
snake_case: Tuple = jnp.array(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(squeeze(SCREAMING_SNAKE_CASE__ , axis=2 ) , np.asarray(squeeze(SCREAMING_SNAKE_CASE__ , axis=2 ) ) ) )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[int] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(SCREAMING_SNAKE_CASE__ , axis=1 ) , np.expand_dims(SCREAMING_SNAKE_CASE__ , axis=1 ) ) )
@require_torch
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: int = np.random.randn(3 , 4 )
snake_case: Any = torch.tensor(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(expand_dims(SCREAMING_SNAKE_CASE__ , axis=1 ) , expand_dims(SCREAMING_SNAKE_CASE__ , axis=1 ).numpy() ) )
@require_tf
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: str = np.random.randn(3 , 4 )
snake_case: Any = tf.constant(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(expand_dims(SCREAMING_SNAKE_CASE__ , axis=1 ) , expand_dims(SCREAMING_SNAKE_CASE__ , axis=1 ).numpy() ) )
@require_flax
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[Any] = np.random.randn(3 , 4 )
snake_case: int = jnp.array(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(expand_dims(SCREAMING_SNAKE_CASE__ , axis=1 ) , np.asarray(expand_dims(SCREAMING_SNAKE_CASE__ , axis=1 ) ) ) ) | 692 | 0 |
'''simple docstring'''
__UpperCAmelCase = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
__UpperCAmelCase = [{"type": "code", "content": INSTALL_CONTENT}]
__UpperCAmelCase = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
} | 709 |
'''simple docstring'''
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
__UpperCAmelCase = logging.get_logger(__name__)
# General docstring
__UpperCAmelCase = "PoolFormerConfig"
# Base docstring
__UpperCAmelCase = "sail/poolformer_s12"
__UpperCAmelCase = [1, 512, 7, 7]
# Image classification docstring
__UpperCAmelCase = "sail/poolformer_s12"
__UpperCAmelCase = "tabby, tabby cat"
__UpperCAmelCase = [
"sail/poolformer_s12",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def lowerCAmelCase_ ( __A : Tuple , __A : float = 0.0 , __A : bool = False ):
'''simple docstring'''
if drop_prob == 0.0 or not training:
return input
snake_case: Union[str, Any] = 1 - drop_prob
snake_case: List[Any] = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
snake_case: List[Any] = keep_prob + torch.rand(__A , dtype=input.dtype , device=input.device )
random_tensor.floor_() # binarize
snake_case: Any = input.div(__A ) * random_tensor
return output
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ = None ):
'''simple docstring'''
super().__init__()
snake_case: List[str] = drop_prob
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return drop_path(SCREAMING_SNAKE_CASE__ , self.drop_prob , self.training )
def _UpperCamelCase ( self ):
'''simple docstring'''
return "p={}".format(self.drop_prob )
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
super().__init__()
snake_case: List[str] = patch_size if isinstance(SCREAMING_SNAKE_CASE__ , collections.abc.Iterable ) else (patch_size, patch_size)
snake_case: List[str] = stride if isinstance(SCREAMING_SNAKE_CASE__ , collections.abc.Iterable ) else (stride, stride)
snake_case: Union[str, Any] = padding if isinstance(SCREAMING_SNAKE_CASE__ , collections.abc.Iterable ) else (padding, padding)
snake_case: Any = nn.Convad(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , kernel_size=SCREAMING_SNAKE_CASE__ , stride=SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ )
snake_case: Tuple = norm_layer(SCREAMING_SNAKE_CASE__ ) if norm_layer else nn.Identity()
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: int = self.projection(SCREAMING_SNAKE_CASE__ )
snake_case: Any = self.norm(SCREAMING_SNAKE_CASE__ )
return embeddings
class SCREAMING_SNAKE_CASE ( nn.GroupNorm ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
super().__init__(1 , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
super().__init__()
snake_case: str = nn.AvgPoolad(SCREAMING_SNAKE_CASE__ , stride=1 , padding=pool_size // 2 , count_include_pad=SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return self.pool(SCREAMING_SNAKE_CASE__ ) - hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
super().__init__()
snake_case: List[Any] = nn.Convad(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 1 )
snake_case: Any = nn.Convad(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 1 )
snake_case: str = PoolFormerDropPath(SCREAMING_SNAKE_CASE__ )
if isinstance(config.hidden_act , SCREAMING_SNAKE_CASE__ ):
snake_case: Tuple = ACTaFN[config.hidden_act]
else:
snake_case: int = config.hidden_act
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: Dict = self.conva(SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = self.act_fn(SCREAMING_SNAKE_CASE__ )
snake_case: List[str] = self.drop(SCREAMING_SNAKE_CASE__ )
snake_case: List[str] = self.conva(SCREAMING_SNAKE_CASE__ )
snake_case: str = self.drop(SCREAMING_SNAKE_CASE__ )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
super().__init__()
snake_case: Tuple = PoolFormerPooling(SCREAMING_SNAKE_CASE__ )
snake_case: Dict = PoolFormerOutput(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case: Dict = PoolFormerGroupNorm(SCREAMING_SNAKE_CASE__ )
snake_case: List[Any] = PoolFormerGroupNorm(SCREAMING_SNAKE_CASE__ )
# Useful for training neural nets
snake_case: Union[str, Any] = PoolFormerDropPath(SCREAMING_SNAKE_CASE__ ) if drop_path > 0.0 else nn.Identity()
snake_case: Optional[Any] = config.use_layer_scale
if config.use_layer_scale:
snake_case: Any = nn.Parameter(
config.layer_scale_init_value * torch.ones((SCREAMING_SNAKE_CASE__) ) , requires_grad=SCREAMING_SNAKE_CASE__ )
snake_case: int = nn.Parameter(
config.layer_scale_init_value * torch.ones((SCREAMING_SNAKE_CASE__) ) , requires_grad=SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if self.use_layer_scale:
snake_case: str = self.pooling(self.before_norm(SCREAMING_SNAKE_CASE__ ) )
snake_case: Dict = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
snake_case: str = hidden_states + self.drop_path(SCREAMING_SNAKE_CASE__ )
snake_case: Dict = ()
snake_case: Dict = self.output(self.after_norm(SCREAMING_SNAKE_CASE__ ) )
snake_case: Union[str, Any] = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
snake_case: Any = hidden_states + self.drop_path(SCREAMING_SNAKE_CASE__ )
snake_case: List[str] = (output,) + outputs
return outputs
else:
snake_case: Optional[Any] = self.drop_path(self.pooling(self.before_norm(SCREAMING_SNAKE_CASE__ ) ) )
# First residual connection
snake_case: Union[str, Any] = pooling_output + hidden_states
snake_case: List[Any] = ()
# Second residual connection inside the PoolFormerOutput block
snake_case: List[str] = self.drop_path(self.output(self.after_norm(SCREAMING_SNAKE_CASE__ ) ) )
snake_case: Dict = hidden_states + layer_output
snake_case: Optional[Any] = (output,) + outputs
return outputs
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
super().__init__()
snake_case: List[Any] = config
# stochastic depth decay rule
snake_case: List[Any] = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
snake_case: Union[str, Any] = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
snake_case: List[Any] = nn.ModuleList(SCREAMING_SNAKE_CASE__ )
# Transformer blocks
snake_case: str = []
snake_case: int = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
snake_case: List[str] = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
SCREAMING_SNAKE_CASE__ , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(SCREAMING_SNAKE_CASE__ ) )
snake_case: Tuple = nn.ModuleList(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=True ):
'''simple docstring'''
snake_case: str = () if output_hidden_states else None
snake_case: Dict = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
snake_case , snake_case: Dict = layers
# Get patch embeddings from hidden_states
snake_case: int = embedding_layer(SCREAMING_SNAKE_CASE__ )
# Send the embeddings through the blocks
for _, blk in enumerate(SCREAMING_SNAKE_CASE__ ):
snake_case: List[Any] = blk(SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = layer_outputs[0]
if output_hidden_states:
snake_case: List[str] = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=SCREAMING_SNAKE_CASE__ , hidden_states=SCREAMING_SNAKE_CASE__ )
class SCREAMING_SNAKE_CASE ( snake_case ):
'''simple docstring'''
__UpperCamelCase = PoolFormerConfig
__UpperCamelCase = "poolformer"
__UpperCamelCase = "pixel_values"
__UpperCamelCase = True
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE__ , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(SCREAMING_SNAKE_CASE__ , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False ):
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case: List[Any] = value
__UpperCAmelCase = R"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
__UpperCAmelCase = R"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n"
@add_start_docstrings(
"The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top." , snake_case , )
class SCREAMING_SNAKE_CASE ( snake_case ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
super().__init__(SCREAMING_SNAKE_CASE__ )
snake_case: List[Any] = config
snake_case: Tuple = PoolFormerEncoder(SCREAMING_SNAKE_CASE__ )
# Initialize weights and apply final processing
self.post_init()
def _UpperCamelCase ( self ):
'''simple docstring'''
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=SCREAMING_SNAKE_CASE__ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , ):
'''simple docstring'''
snake_case: Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
snake_case: List[Any] = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('You have to specify pixel_values' )
snake_case: Optional[Any] = self.encoder(
SCREAMING_SNAKE_CASE__ , output_hidden_states=SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ , )
snake_case: List[Any] = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=SCREAMING_SNAKE_CASE__ , hidden_states=encoder_outputs.hidden_states , )
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
super().__init__()
snake_case: Any = nn.Linear(config.hidden_size , config.hidden_size )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: int = self.dense(SCREAMING_SNAKE_CASE__ )
return output
@add_start_docstrings(
"\n PoolFormer Model transformer with an image classification head on top\n " , snake_case , )
class SCREAMING_SNAKE_CASE ( snake_case ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
super().__init__(SCREAMING_SNAKE_CASE__ )
snake_case: Union[str, Any] = config.num_labels
snake_case: str = PoolFormerModel(SCREAMING_SNAKE_CASE__ )
# Final norm
snake_case: int = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
snake_case: Dict = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=SCREAMING_SNAKE_CASE__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , ):
'''simple docstring'''
snake_case: Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
snake_case: Optional[Any] = self.poolformer(
SCREAMING_SNAKE_CASE__ , output_hidden_states=SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ , )
snake_case: Any = outputs[0]
snake_case: str = self.classifier(self.norm(SCREAMING_SNAKE_CASE__ ).mean([-2, -1] ) )
snake_case: Any = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
snake_case: Tuple = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
snake_case: Dict = 'single_label_classification'
else:
snake_case: List[str] = 'multi_label_classification'
if self.config.problem_type == "regression":
snake_case: Union[str, Any] = MSELoss()
if self.num_labels == 1:
snake_case: List[str] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
snake_case: int = loss_fct(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif self.config.problem_type == "single_label_classification":
snake_case: Union[str, Any] = CrossEntropyLoss()
snake_case: Dict = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
snake_case: int = BCEWithLogitsLoss()
snake_case: Optional[int] = loss_fct(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if not return_dict:
snake_case: str = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=SCREAMING_SNAKE_CASE__ , logits=SCREAMING_SNAKE_CASE__ , hidden_states=outputs.hidden_states ) | 692 | 0 |
def lowerCAmelCase_ ( __A : int ):
'''simple docstring'''
assert (
isinstance(__A , __A ) and number_of_steps > 0
), f"""number_of_steps needs to be positive integer, your input {number_of_steps}"""
if number_of_steps == 1:
return 1
snake_case: Tuple = 1, 1
for _ in range(number_of_steps - 1 ):
snake_case: List[str] = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod() | 710 |
'''simple docstring'''
from queue import PriorityQueue
from typing import Any
import numpy as np
def lowerCAmelCase_ ( __A : dict , __A : str , __A : set , __A : set , __A : dict , __A : dict , __A : PriorityQueue , __A : dict , __A : float | int , ):
'''simple docstring'''
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
snake_case: Any = cst_fwd.get(__A , np.inf )
snake_case: int = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
snake_case: Union[str, Any] = new_cost_f
snake_case: Tuple = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
snake_case: List[str] = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def lowerCAmelCase_ ( __A : str , __A : str , __A : dict , __A : dict ):
'''simple docstring'''
snake_case: Optional[Any] = -1
snake_case: Any = set()
snake_case: str = set()
snake_case: int = {source: 0}
snake_case: Dict = {destination: 0}
snake_case: int = {source: None}
snake_case: Union[str, Any] = {destination: None}
snake_case: PriorityQueue[Any] = PriorityQueue()
snake_case: PriorityQueue[Any] = PriorityQueue()
snake_case: Tuple = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
snake_case , snake_case: List[str] = queue_forward.get()
visited_forward.add(__A )
snake_case , snake_case: int = queue_backward.get()
visited_backward.add(__A )
snake_case: str = pass_and_relaxation(
__A , __A , __A , __A , __A , __A , __A , __A , __A , )
snake_case: Optional[Any] = pass_and_relaxation(
__A , __A , __A , __A , __A , __A , __A , __A , __A , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
snake_case: Any = shortest_distance
return shortest_path_distance
__UpperCAmelCase = {
"B": [["C", 1]],
"C": [["D", 1]],
"D": [["F", 1]],
"E": [["B", 1], ["G", 2]],
"F": [],
"G": [["F", 1]],
}
__UpperCAmelCase = {
"B": [["E", 1]],
"C": [["B", 1]],
"D": [["C", 1]],
"F": [["D", 1], ["G", 1]],
"E": [[None, np.inf]],
"G": [["E", 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod() | 692 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class SCREAMING_SNAKE_CASE ( snake_case , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = ShapEPipeline
__UpperCamelCase = ["prompt"]
__UpperCamelCase = ["prompt"]
__UpperCamelCase = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
__UpperCamelCase = False
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
return 32
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
return 32
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
return 8
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Any = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
snake_case: Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModelWithProjection(SCREAMING_SNAKE_CASE__ )
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
snake_case: Dict = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
snake_case: Optional[Any] = PriorTransformer(**SCREAMING_SNAKE_CASE__ )
return model
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
snake_case: Tuple = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
snake_case: Union[str, Any] = ShapERenderer(**SCREAMING_SNAKE_CASE__ )
return model
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[Any] = self.dummy_prior
snake_case: Dict = self.dummy_text_encoder
snake_case: int = self.dummy_tokenizer
snake_case: List[str] = self.dummy_renderer
snake_case: Tuple = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=10_24 , prediction_type='sample' , use_karras_sigmas=SCREAMING_SNAKE_CASE__ , clip_sample=SCREAMING_SNAKE_CASE__ , clip_sample_range=1.0 , )
snake_case: Any = {
'prior': prior,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=0 ):
'''simple docstring'''
if str(SCREAMING_SNAKE_CASE__ ).startswith('mps' ):
snake_case: Union[str, Any] = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
snake_case: Union[str, Any] = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
snake_case: int = {
'prompt': 'horse',
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[int] = 'cpu'
snake_case: List[str] = self.get_dummy_components()
snake_case: Tuple = self.pipeline_class(**SCREAMING_SNAKE_CASE__ )
snake_case: int = pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
snake_case: Union[str, Any] = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ ) )
snake_case: Union[str, Any] = output.images[0]
snake_case: List[str] = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
snake_case: Union[str, Any] = np.array(
[
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _UpperCamelCase ( self ):
'''simple docstring'''
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[Any] = torch_device == 'cpu'
snake_case: Union[str, Any] = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=SCREAMING_SNAKE_CASE__ , relax_max_difference=SCREAMING_SNAKE_CASE__ , )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Dict = self.get_dummy_components()
snake_case: Dict = self.pipeline_class(**SCREAMING_SNAKE_CASE__ )
snake_case: Tuple = pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
snake_case: List[Any] = 1
snake_case: List[Any] = 2
snake_case: Optional[Any] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
for key in inputs.keys():
if key in self.batch_params:
snake_case: Any = batch_size * [inputs[key]]
snake_case: Dict = pipe(**SCREAMING_SNAKE_CASE__ , num_images_per_prompt=SCREAMING_SNAKE_CASE__ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: str = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_np_out.npy' )
snake_case: Tuple = ShapEPipeline.from_pretrained('openai/shap-e' )
snake_case: List[str] = pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
snake_case: Union[str, Any] = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(0 )
snake_case: int = pipe(
'a shark' , generator=SCREAMING_SNAKE_CASE__ , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) | 711 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = "▁"
__UpperCAmelCase = {"vocab_file": "sentencepiece.bpe.model"}
__UpperCAmelCase = {
"vocab_file": {
"facebook/xglm-564M": "https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model",
}
}
__UpperCAmelCase = {
"facebook/xglm-564M": 2_048,
}
class SCREAMING_SNAKE_CASE ( snake_case ):
'''simple docstring'''
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = ["input_ids", "attention_mask"]
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="<unk>" , SCREAMING_SNAKE_CASE__="<pad>" , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ):
'''simple docstring'''
snake_case: Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
snake_case: Optional[Any] = 7
snake_case: List[str] = [F"""<madeupword{i}>""" for i in range(self.num_madeup_words )]
snake_case: str = kwargs.get('additional_special_tokens' , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE__ , )
snake_case: int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(SCREAMING_SNAKE_CASE__ ) )
snake_case: int = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
snake_case: Tuple = 1
# Mimic fairseq token-to-id alignment for the first 4 token
snake_case: Optional[Any] = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
snake_case: Union[str, Any] = len(self.sp_model )
snake_case: str = {F"""<madeupword{i}>""": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(SCREAMING_SNAKE_CASE__ )
snake_case: Union[str, Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ):
'''simple docstring'''
snake_case: List[Any] = self.__dict__.copy()
snake_case: Union[str, Any] = None
snake_case: Union[str, Any] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: Optional[int] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
snake_case: Union[str, Any] = {}
snake_case: Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
snake_case: Optional[Any] = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE__ , token_ids_a=SCREAMING_SNAKE_CASE__ , already_has_special_tokens=SCREAMING_SNAKE_CASE__ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ ))
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE__ ))
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
'''simple docstring'''
snake_case: int = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[int] = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return self.sp_model.encode(SCREAMING_SNAKE_CASE__ , out_type=SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
snake_case: Dict = self.sp_model.PieceToId(SCREAMING_SNAKE_CASE__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: Optional[Any] = ''.join(SCREAMING_SNAKE_CASE__ ).replace(SCREAMING_SNAKE_CASE__ , ' ' ).strip()
return out_string
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
'''simple docstring'''
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case: List[str] = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE__ , 'wb' ) as fi:
snake_case: int = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE__ )
return (out_vocab_file,) | 692 | 0 |
'''simple docstring'''
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
raise NotImplementedError()
def _UpperCamelCase ( self ):
'''simple docstring'''
raise NotImplementedError()
class SCREAMING_SNAKE_CASE ( snake_case ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = False , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: Optional[Any] = tokenizer
snake_case: Dict = skip_prompt
snake_case: Any = decode_kwargs
# variables used in the streaming process
snake_case: Tuple = []
snake_case: Tuple = 0
snake_case: List[str] = True
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError('TextStreamer only supports batch size 1' )
elif len(value.shape ) > 1:
snake_case: Any = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
snake_case: str = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
snake_case: int = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith('\n' ):
snake_case: Union[str, Any] = text[self.print_len :]
snake_case: List[str] = []
snake_case: List[str] = 0
# If the last token is a CJK character, we print the characters.
elif len(SCREAMING_SNAKE_CASE__ ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
snake_case: List[Any] = text[self.print_len :]
self.print_len += len(SCREAMING_SNAKE_CASE__ )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
snake_case: List[str] = text[self.print_len : text.rfind(' ' ) + 1]
self.print_len += len(SCREAMING_SNAKE_CASE__ )
self.on_finalized_text(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
if len(self.token_cache ) > 0:
snake_case: Any = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
snake_case: Optional[Any] = text[self.print_len :]
snake_case: str = []
snake_case: Tuple = 0
else:
snake_case: List[str] = ''
snake_case: Any = True
self.on_finalized_text(SCREAMING_SNAKE_CASE__ , stream_end=SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = False ):
'''simple docstring'''
print(SCREAMING_SNAKE_CASE__ , flush=SCREAMING_SNAKE_CASE__ , end='' if not stream_end else None )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if (
(cp >= 0x4_e00 and cp <= 0x9_fff)
or (cp >= 0x3_400 and cp <= 0x4_dbf) #
or (cp >= 0x20_000 and cp <= 0x2a_6df) #
or (cp >= 0x2a_700 and cp <= 0x2b_73f) #
or (cp >= 0x2b_740 and cp <= 0x2b_81f) #
or (cp >= 0x2b_820 and cp <= 0x2c_eaf) #
or (cp >= 0xf_900 and cp <= 0xf_aff)
or (cp >= 0x2f_800 and cp <= 0x2f_a1f) #
): #
return True
return False
class SCREAMING_SNAKE_CASE ( snake_case ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
super().__init__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
snake_case: Union[str, Any] = Queue()
snake_case: List[str] = None
snake_case: Tuple = timeout
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = False ):
'''simple docstring'''
self.text_queue.put(SCREAMING_SNAKE_CASE__ , timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout )
def __iter__( self ):
'''simple docstring'''
return self
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Dict = self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value | 712 |
'''simple docstring'''
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def lowerCAmelCase_ ( __A : Optional[Any] ):
'''simple docstring'''
return getitem, k
def lowerCAmelCase_ ( __A : Any , __A : Optional[int] ):
'''simple docstring'''
return setitem, k, v
def lowerCAmelCase_ ( __A : List[str] ):
'''simple docstring'''
return delitem, k
def lowerCAmelCase_ ( __A : str , __A : int , *__A : Tuple ):
'''simple docstring'''
try:
return fun(__A , *__A ), None
except Exception as e:
return None, e
__UpperCAmelCase = (
_set("key_a", "val_a"),
_set("key_b", "val_b"),
)
__UpperCAmelCase = [
_set("key_a", "val_a"),
_set("key_a", "val_b"),
]
__UpperCAmelCase = [
_set("key_a", "val_a"),
_set("key_b", "val_b"),
_del("key_a"),
_del("key_b"),
_set("key_a", "val_a"),
_del("key_a"),
]
__UpperCAmelCase = [
_get("key_a"),
_del("key_a"),
_set("key_a", "val_a"),
_del("key_a"),
_del("key_a"),
_get("key_a"),
]
__UpperCAmelCase = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
__UpperCAmelCase = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set("key_a", "val_b"),
]
@pytest.mark.parametrize(
'operations' , (
pytest.param(_add_items , id='add items' ),
pytest.param(_overwrite_items , id='overwrite items' ),
pytest.param(_delete_items , id='delete items' ),
pytest.param(_access_absent_items , id='access absent items' ),
pytest.param(_add_with_resize_up , id='add with resize up' ),
pytest.param(_add_with_resize_down , id='add with resize down' ),
) , )
def lowerCAmelCase_ ( __A : str ):
'''simple docstring'''
snake_case: List[Any] = HashMap(initial_block_size=4 )
snake_case: List[Any] = {}
for _, (fun, *args) in enumerate(__A ):
snake_case , snake_case: Optional[int] = _run_operation(__A , __A , *__A )
snake_case , snake_case: str = _run_operation(__A , __A , *__A )
assert my_res == py_res
assert str(__A ) == str(__A )
assert set(__A ) == set(__A )
assert len(__A ) == len(__A )
assert set(my.items() ) == set(py.items() )
def lowerCAmelCase_ ( ):
'''simple docstring'''
def is_public(__A : str ) -> bool:
return not name.startswith('_' )
snake_case: Dict = {name for name in dir({} ) if is_public(__A )}
snake_case: List[str] = {name for name in dir(HashMap() ) if is_public(__A )}
assert dict_public_names > hash_public_names | 692 | 0 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
__UpperCAmelCase = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def lowerCAmelCase_ ( __A : Any , __A : Optional[Any] , __A : Union[str, Any] , __A : int , __A : Optional[int] ):
'''simple docstring'''
for attribute in key.split('.' ):
snake_case: List[str] = getattr(__A , __A )
if weight_type is not None:
snake_case: Optional[int] = getattr(__A , __A ).shape
else:
snake_case: Optional[int] = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
snake_case: Optional[int] = value
elif weight_type == "weight_g":
snake_case: List[str] = value
elif weight_type == "weight_v":
snake_case: Dict = value
elif weight_type == "bias":
snake_case: Optional[Any] = value
else:
snake_case: int = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def lowerCAmelCase_ ( __A : List[Any] , __A : List[str] ):
'''simple docstring'''
snake_case: List[Any] = []
snake_case: List[Any] = fairseq_model.state_dict()
snake_case: Union[str, Any] = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
snake_case: Dict = None
for name, value in fairseq_dict.items():
snake_case: Tuple = False
if "conv_layers" in name:
load_conv_layer(
__A , __A , __A , __A , hf_model.config.feat_extract_norm == 'group' , )
snake_case: List[Any] = True
elif name.split('.' )[0] == "proj":
snake_case: List[Any] = fairseq_model.proj
snake_case: int = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
snake_case: int = True
if "*" in mapped_key:
snake_case: List[str] = name.split(__A )[0].split('.' )[-2]
snake_case: Dict = mapped_key.replace('*' , __A )
if "weight_g" in name:
snake_case: Tuple = 'weight_g'
elif "weight_v" in name:
snake_case: int = 'weight_v'
elif "bias" in name:
snake_case: Tuple = 'bias'
elif "weight" in name:
snake_case: List[Any] = 'weight'
else:
snake_case: Any = None
set_recursively(__A , __A , __A , __A , __A )
continue
if not is_used:
unused_weights.append(__A )
logger.warning(f"""Unused weights: {unused_weights}""" )
return proj_weight
def lowerCAmelCase_ ( __A : List[str] , __A : List[Any] , __A : int , __A : Optional[Any] , __A : Optional[int] ):
'''simple docstring'''
snake_case: int = full_name.split('conv_layers.' )[-1]
snake_case: Tuple = name.split('.' )
snake_case: Any = int(items[0] )
snake_case: Optional[int] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
snake_case: Tuple = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
snake_case: int = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
snake_case: Any = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
snake_case: str = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__A )
def lowerCAmelCase_ ( __A : Dict ):
'''simple docstring'''
snake_case: List[Any] = emb.weight.shape
snake_case: Optional[int] = nn.Linear(__A , __A , bias=__A )
snake_case: Any = emb.weight.data
return lin_layer
def lowerCAmelCase_ ( __A : Optional[int] ):
'''simple docstring'''
with open(__A , 'r' , encoding='utf-8' ) as f:
snake_case: List[Any] = f.readlines()
snake_case: Any = [line.split(' ' )[0] for line in lines]
snake_case: int = len(__A )
snake_case: Dict = {
'<s>': 0,
'<pad>': 1,
'</s>': 2,
'<unk>': 3,
}
vocab_dict.update(dict(zip(__A , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def lowerCAmelCase_ ( __A : Optional[Any] , __A : Union[str, Any] , __A : Dict , __A : Any , __A : List[Any] , __A : int , __A : str , ):
'''simple docstring'''
snake_case: Union[str, Any] = WavaVecaConfig.from_pretrained(__A )
snake_case: str = SpeechaTextaConfig.from_pretrained(
__A , vocab_size=__A , decoder_layers=__A , do_stable_layer_norm=__A )
snake_case: List[str] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=__A , return_attention_mask=__A , )
snake_case: List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
snake_case: List[Any] = model[0].eval()
# set weights for wav2vec2 encoder
snake_case: Optional[Any] = WavaVecaModel(__A )
snake_case: Any = recursively_load_weights_wavaveca(model.encoder , __A )
snake_case: Union[str, Any] = SpeechaTextaForCausalLM(__A )
snake_case: Optional[Any] = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=__A )
# set output linear layer
unexpected_keys.remove('embed_out' )
snake_case: str = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(f"""The following keys are missing when loading the decoder weights: {missing_keys}""" )
logger.warning(f"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" )
snake_case: int = SpeechEncoderDecoderModel(encoder=__A , decoder=__A )
snake_case: List[Any] = False
# add projection layer
snake_case: Union[str, Any] = nn.Parameter(projection_layer.weight )
snake_case: Union[str, Any] = nn.Parameter(projection_layer.bias )
snake_case: List[Any] = create_vocab_dict(__A )
with open(os.path.join(__A , 'vocab.json' ) , 'w' ) as fp:
json.dump(__A , __A )
snake_case: Union[str, Any] = SpeechaTextaTokenizer(os.path.join(__A , 'vocab.json' ) )
tokenizer.save_pretrained(__A )
snake_case: Tuple = hf_wavavec.config.to_dict()
snake_case: int = tokenizer.pad_token_id
snake_case: Dict = tokenizer.bos_token_id
snake_case: Optional[int] = tokenizer.eos_token_id
snake_case: Dict = 'speech_to_text_2'
snake_case: Optional[Any] = 'wav2vec2'
snake_case: Tuple = SpeechEncoderDecoderConfig.from_dict(__A )
hf_wavavec.save_pretrained(__A )
feature_extractor.save_pretrained(__A )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument(
"--encoder_config_path",
default="facebook/wav2vec2-large-lv60",
type=str,
help="Path to hf encoder wav2vec2 checkpoint config",
)
parser.add_argument(
"--decoder_config_path",
default="facebook/s2t-small-mustc-en-fr-st",
type=str,
help="Path to hf decoder s2t checkpoint config",
)
parser.add_argument("--vocab_size", default=10_224, type=int, help="Vocab size of decoder")
parser.add_argument("--num_decoder_layers", default=7, type=int, help="Number of decoder layers")
__UpperCAmelCase = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
) | 713 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
__UpperCAmelCase = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def lowerCAmelCase_ ( __A : Any , __A : Optional[Any] , __A : Union[str, Any] , __A : int , __A : Optional[int] ):
'''simple docstring'''
for attribute in key.split('.' ):
snake_case: List[str] = getattr(__A , __A )
if weight_type is not None:
snake_case: Optional[int] = getattr(__A , __A ).shape
else:
snake_case: Optional[int] = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
snake_case: Optional[int] = value
elif weight_type == "weight_g":
snake_case: List[str] = value
elif weight_type == "weight_v":
snake_case: Dict = value
elif weight_type == "bias":
snake_case: Optional[Any] = value
else:
snake_case: int = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def lowerCAmelCase_ ( __A : List[Any] , __A : List[str] ):
'''simple docstring'''
snake_case: List[Any] = []
snake_case: List[Any] = fairseq_model.state_dict()
snake_case: Union[str, Any] = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
snake_case: Dict = None
for name, value in fairseq_dict.items():
snake_case: Tuple = False
if "conv_layers" in name:
load_conv_layer(
__A , __A , __A , __A , hf_model.config.feat_extract_norm == 'group' , )
snake_case: List[Any] = True
elif name.split('.' )[0] == "proj":
snake_case: List[Any] = fairseq_model.proj
snake_case: int = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
snake_case: int = True
if "*" in mapped_key:
snake_case: List[str] = name.split(__A )[0].split('.' )[-2]
snake_case: Dict = mapped_key.replace('*' , __A )
if "weight_g" in name:
snake_case: Tuple = 'weight_g'
elif "weight_v" in name:
snake_case: int = 'weight_v'
elif "bias" in name:
snake_case: Tuple = 'bias'
elif "weight" in name:
snake_case: List[Any] = 'weight'
else:
snake_case: Any = None
set_recursively(__A , __A , __A , __A , __A )
continue
if not is_used:
unused_weights.append(__A )
logger.warning(f"""Unused weights: {unused_weights}""" )
return proj_weight
def lowerCAmelCase_ ( __A : List[str] , __A : List[Any] , __A : int , __A : Optional[Any] , __A : Optional[int] ):
'''simple docstring'''
snake_case: int = full_name.split('conv_layers.' )[-1]
snake_case: Tuple = name.split('.' )
snake_case: Any = int(items[0] )
snake_case: Optional[int] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
snake_case: Tuple = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
snake_case: int = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
snake_case: Any = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
snake_case: str = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__A )
def lowerCAmelCase_ ( __A : Dict ):
'''simple docstring'''
snake_case , snake_case: List[Any] = emb.weight.shape
snake_case: Optional[int] = nn.Linear(__A , __A , bias=__A )
snake_case: Any = emb.weight.data
return lin_layer
def lowerCAmelCase_ ( __A : Optional[int] ):
'''simple docstring'''
with open(__A , 'r' , encoding='utf-8' ) as f:
snake_case: List[Any] = f.readlines()
snake_case: Any = [line.split(' ' )[0] for line in lines]
snake_case: int = len(__A )
snake_case: Dict = {
'<s>': 0,
'<pad>': 1,
'</s>': 2,
'<unk>': 3,
}
vocab_dict.update(dict(zip(__A , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def lowerCAmelCase_ ( __A : Optional[Any] , __A : Union[str, Any] , __A : Dict , __A : Any , __A : List[Any] , __A : int , __A : str , ):
'''simple docstring'''
snake_case: Union[str, Any] = WavaVecaConfig.from_pretrained(__A )
snake_case: str = SpeechaTextaConfig.from_pretrained(
__A , vocab_size=__A , decoder_layers=__A , do_stable_layer_norm=__A )
snake_case: List[str] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=__A , return_attention_mask=__A , )
snake_case , snake_case , snake_case: List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
snake_case: List[Any] = model[0].eval()
# set weights for wav2vec2 encoder
snake_case: Optional[Any] = WavaVecaModel(__A )
snake_case: Any = recursively_load_weights_wavaveca(model.encoder , __A )
snake_case: Union[str, Any] = SpeechaTextaForCausalLM(__A )
snake_case , snake_case: Optional[Any] = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=__A )
# set output linear layer
unexpected_keys.remove('embed_out' )
snake_case: str = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(f"""The following keys are missing when loading the decoder weights: {missing_keys}""" )
logger.warning(f"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" )
snake_case: int = SpeechEncoderDecoderModel(encoder=__A , decoder=__A )
snake_case: List[Any] = False
# add projection layer
snake_case: Union[str, Any] = nn.Parameter(projection_layer.weight )
snake_case: Union[str, Any] = nn.Parameter(projection_layer.bias )
snake_case: List[Any] = create_vocab_dict(__A )
with open(os.path.join(__A , 'vocab.json' ) , 'w' ) as fp:
json.dump(__A , __A )
snake_case: Union[str, Any] = SpeechaTextaTokenizer(os.path.join(__A , 'vocab.json' ) )
tokenizer.save_pretrained(__A )
snake_case: Tuple = hf_wavavec.config.to_dict()
snake_case: int = tokenizer.pad_token_id
snake_case: Dict = tokenizer.bos_token_id
snake_case: Optional[int] = tokenizer.eos_token_id
snake_case: Dict = 'speech_to_text_2'
snake_case: Optional[Any] = 'wav2vec2'
snake_case: Tuple = SpeechEncoderDecoderConfig.from_dict(__A )
hf_wavavec.save_pretrained(__A )
feature_extractor.save_pretrained(__A )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument(
"--encoder_config_path",
default="facebook/wav2vec2-large-lv60",
type=str,
help="Path to hf encoder wav2vec2 checkpoint config",
)
parser.add_argument(
"--decoder_config_path",
default="facebook/s2t-small-mustc-en-fr-st",
type=str,
help="Path to hf decoder s2t checkpoint config",
)
parser.add_argument("--vocab_size", default=10_224, type=int, help="Vocab size of decoder")
parser.add_argument("--num_decoder_layers", default=7, type=int, help="Number of decoder layers")
__UpperCAmelCase = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
) | 692 | 0 |
'''simple docstring'''
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def lowerCAmelCase_ ( __A : Tuple ):
'''simple docstring'''
snake_case: Tuple = model.config
snake_case: str = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=1_28 , )
snake_case: Optional[Any] = MBartConfig(
is_decoder=__A , is_encoder_decoder=__A , add_cross_attention=__A , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=__A , add_final_layer_norm=__A , )
return encoder_config, decoder_config
def lowerCAmelCase_ ( __A : int ):
'''simple docstring'''
if "encoder.model" in name:
snake_case: Optional[Any] = name.replace('encoder.model' , 'encoder' )
if "decoder.model" in name:
snake_case: str = name.replace('decoder.model' , 'decoder' )
if "patch_embed.proj" in name:
snake_case: Any = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
snake_case: Optional[int] = name.replace('patch_embed.norm' , 'embeddings.norm' )
if name.startswith('encoder' ):
if "layers" in name:
snake_case: Tuple = 'encoder.' + name
if "attn.proj" in name:
snake_case: Optional[int] = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name and "mask" not in name:
snake_case: Dict = name.replace('attn' , 'attention.self' )
if "norm1" in name:
snake_case: Union[str, Any] = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
snake_case: Dict = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
snake_case: List[str] = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
snake_case: Dict = name.replace('mlp.fc2' , 'output.dense' )
if name == "encoder.norm.weight":
snake_case: Dict = 'encoder.layernorm.weight'
if name == "encoder.norm.bias":
snake_case: int = 'encoder.layernorm.bias'
return name
def lowerCAmelCase_ ( __A : List[Any] , __A : Optional[Any] ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
snake_case: List[Any] = orig_state_dict.pop(__A )
if "qkv" in key:
snake_case: Union[str, Any] = key.split('.' )
snake_case: Optional[Any] = int(key_split[3] )
snake_case: Any = int(key_split[5] )
snake_case: Union[str, Any] = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
snake_case: Union[str, Any] = val[:dim, :]
snake_case: Any = val[dim : dim * 2, :]
snake_case: List[str] = val[-dim:, :]
else:
snake_case: str = val[:dim]
snake_case: Union[str, Any] = val[dim : dim * 2]
snake_case: List[Any] = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
snake_case: Optional[int] = val
return orig_state_dict
def lowerCAmelCase_ ( __A : List[Any] , __A : Any=None , __A : List[str]=False ):
'''simple docstring'''
snake_case: str = DonutModel.from_pretrained(__A ).eval()
# load HuggingFace model
snake_case: Optional[Any] = get_configs(__A )
snake_case: Optional[int] = DonutSwinModel(__A )
snake_case: Tuple = MBartForCausalLM(__A )
snake_case: Optional[Any] = VisionEncoderDecoderModel(encoder=__A , decoder=__A )
model.eval()
snake_case: Optional[int] = original_model.state_dict()
snake_case: Optional[int] = convert_state_dict(__A , __A )
model.load_state_dict(__A )
# verify results on scanned document
snake_case: Union[str, Any] = load_dataset('hf-internal-testing/example-documents' )
snake_case: str = dataset['test'][0]['image'].convert('RGB' )
snake_case: Optional[int] = XLMRobertaTokenizerFast.from_pretrained(__A , from_slow=__A )
snake_case: Any = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
snake_case: Dict = DonutProcessor(__A , __A )
snake_case: Optional[Any] = processor(__A , return_tensors='pt' ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
snake_case: int = '<s_docvqa><s_question>{user_input}</s_question><s_answer>'
snake_case: Optional[Any] = 'When is the coffee break?'
snake_case: Optional[int] = task_prompt.replace('{user_input}' , __A )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
snake_case: Dict = '<s_rvlcdip>'
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
snake_case: str = '<s_cord>'
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
snake_case: str = 's_cord-v2>'
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
snake_case: int = '<s_zhtrainticket>'
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
snake_case: Optional[Any] = 'hello world'
else:
raise ValueError('Model name not supported' )
snake_case: Optional[int] = original_model.decoder.tokenizer(__A , add_special_tokens=__A , return_tensors='pt' )[
'input_ids'
]
snake_case: Any = original_model.encoder.model.patch_embed(__A )
snake_case: Dict = model.encoder.embeddings(__A )
assert torch.allclose(__A , __A , atol=1E-3 )
# verify encoder hidden states
snake_case: Tuple = original_model.encoder(__A )
snake_case: List[str] = model.encoder(__A ).last_hidden_state
assert torch.allclose(__A , __A , atol=1E-2 )
# verify decoder hidden states
snake_case: List[Any] = original_model(__A , __A , __A ).logits
snake_case: List[Any] = model(__A , decoder_input_ids=__A ).logits
assert torch.allclose(__A , __A , atol=1E-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(f"""Saving model and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(__A )
processor.save_pretrained(__A )
if push_to_hub:
model.push_to_hub('nielsr/' + model_name.split('/' )[-1] , commit_message='Update model' )
processor.push_to_hub('nielsr/' + model_name.split('/' )[-1] , commit_message='Update model' )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="naver-clova-ix/donut-base-finetuned-docvqa",
required=False,
type=str,
help="Name of the original model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
required=False,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether or not to push the converted model and processor to the 🤗 hub.",
)
__UpperCAmelCase = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 714 |
'''simple docstring'''
def lowerCAmelCase_ ( __A : int = 1_00 ):
'''simple docstring'''
snake_case: List[str] = n * (n + 1) * (2 * n + 1) / 6
snake_case: List[Any] = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F'{solution() = }') | 692 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=10 , SCREAMING_SNAKE_CASE__=[10, 20, 30, 40] , SCREAMING_SNAKE_CASE__=[1, 1, 2, 1] , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__="relu" , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=None , ):
'''simple docstring'''
snake_case: Dict = parent
snake_case: Dict = batch_size
snake_case: Optional[Any] = image_size
snake_case: Tuple = num_channels
snake_case: Optional[Any] = embeddings_size
snake_case: int = hidden_sizes
snake_case: List[str] = depths
snake_case: Optional[int] = is_training
snake_case: Union[str, Any] = use_labels
snake_case: int = hidden_act
snake_case: Tuple = num_labels
snake_case: List[str] = scope
snake_case: Union[str, Any] = len(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case: Tuple = self.get_config()
return config, pixel_values
def _UpperCamelCase ( self ):
'''simple docstring'''
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: Optional[int] = FlaxRegNetModel(config=SCREAMING_SNAKE_CASE__ )
snake_case: Union[str, Any] = model(SCREAMING_SNAKE_CASE__ )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: Tuple = self.num_labels
snake_case: List[Any] = FlaxRegNetForImageClassification(config=SCREAMING_SNAKE_CASE__ )
snake_case: str = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[int] = self.prepare_config_and_inputs()
snake_case: Union[str, Any] = config_and_inputs
snake_case: Optional[int] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class SCREAMING_SNAKE_CASE ( snake_case , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[str] = FlaxRegNetModelTester(self )
snake_case: Optional[int] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , has_text_modality=SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _UpperCamelCase ( self ):
'''simple docstring'''
return
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE__ )
@unittest.skip(reason='RegNet does not use inputs_embeds' )
def _UpperCamelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='RegNet does not support input and output embeddings' )
def _UpperCamelCase ( self ):
'''simple docstring'''
pass
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case: str = model_class(SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case: Any = [*signature.parameters.keys()]
snake_case: List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
def check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case: Tuple = model_class(SCREAMING_SNAKE_CASE__ )
snake_case: str = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
snake_case: List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
snake_case: Optional[Any] = self.model_tester.num_stages
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , expected_num_stages + 1 )
snake_case: Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case: Union[str, Any] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case: Optional[int] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
snake_case: Union[str, Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case: int = model_class(SCREAMING_SNAKE_CASE__ )
@jax.jit
def model_jitted(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
return model(pixel_values=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
with self.subTest('JIT Enabled' ):
snake_case: List[Any] = model_jitted(**SCREAMING_SNAKE_CASE__ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
snake_case: Any = model_jitted(**SCREAMING_SNAKE_CASE__ ).to_tuple()
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) )
for jitted_output, output in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCAmelCase_ ( ):
'''simple docstring'''
snake_case: List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_flax
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _UpperCamelCase ( self ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained('facebook/regnet-y-040' ) if is_vision_available() else None
@slow
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[str] = FlaxRegNetForImageClassification.from_pretrained('facebook/regnet-y-040' )
snake_case: Union[str, Any] = self.default_image_processor
snake_case: List[str] = prepare_img()
snake_case: Optional[Any] = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='np' )
snake_case: Any = model(**SCREAMING_SNAKE_CASE__ )
# verify the logits
snake_case: Any = (1, 10_00)
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE__ )
snake_case: int = jnp.array([-0.41_80, -1.50_51, -3.48_36] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 ) ) | 715 |
'''simple docstring'''
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
__UpperCAmelCase = [
# tf -> hf
("/", "."),
("layer_", "layers."),
("kernel", "weight"),
("beta", "bias"),
("gamma", "weight"),
("pegasus", "model"),
]
__UpperCAmelCase = [
(".output.dense", ".fc2"),
("intermediate.LayerNorm", "final_layer_norm"),
("intermediate.dense", "fc1"),
]
__UpperCAmelCase = (
INIT_COMMON
+ [
("attention.self.LayerNorm", "self_attn_layer_norm"),
("attention.output.dense", "self_attn.out_proj"),
("attention.self", "self_attn"),
("attention.encdec.LayerNorm", "encoder_attn_layer_norm"),
("attention.encdec_output.dense", "encoder_attn.out_proj"),
("attention.encdec", "encoder_attn"),
("key", "k_proj"),
("value", "v_proj"),
("query", "q_proj"),
("decoder.LayerNorm", "decoder.layernorm_embedding"),
]
+ END_COMMON
)
__UpperCAmelCase = (
INIT_COMMON
+ [
("embeddings.word_embeddings", "shared.weight"),
("embeddings.position_embeddings", "embed_positions.weight"),
("attention.self.LayerNorm", "self_attn_layer_norm"),
("attention.output.dense", "self_attn.output"),
("attention.self", "self_attn.self"),
("encoder.LayerNorm", "encoder.layernorm_embedding"),
]
+ END_COMMON
)
__UpperCAmelCase = [
"encdec/key/bias",
"encdec/query/bias",
"encdec/value/bias",
"self/key/bias",
"self/query/bias",
"self/value/bias",
"encdec_output/dense/bias",
"attention/output/dense/bias",
]
def lowerCAmelCase_ ( __A : Dict , __A : List[Any] ):
'''simple docstring'''
for tf_name, hf_name in patterns:
snake_case: List[Any] = k.replace(__A , __A )
return k
def lowerCAmelCase_ ( __A : dict , __A : dict ):
'''simple docstring'''
snake_case: Optional[int] = BigBirdPegasusConfig(**__A )
snake_case: List[Any] = BigBirdPegasusForConditionalGeneration(__A )
snake_case: Any = torch_model.state_dict()
snake_case: Any = {}
# separating decoder weights
snake_case: Optional[Any] = {k: tf_weights[k] for k in tf_weights if k.startswith('pegasus/decoder' )}
snake_case: Any = {k: tf_weights[k] for k in tf_weights if not k.startswith('pegasus/decoder' )}
for k, v in tqdm(decoder_weights.items() , 'tf -> hf conversion' ):
snake_case: List[str] = [k.endswith(__A ) for ending in KEYS_TO_IGNORE]
if any(__A ):
continue
snake_case: Any = DECODER_PATTERNS
snake_case: int = rename_state_dict_key(__A , __A )
if new_k not in state_dict:
raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ['dense', 'query', 'key', 'value'] ):
snake_case: Optional[Any] = v.T
snake_case: Any = torch.from_numpy(__A )
assert v.shape == state_dict[new_k].shape, f"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
for k, v in tqdm(remaining_weights.items() , 'tf -> hf conversion' ):
snake_case: List[Any] = [k.endswith(__A ) for ending in KEYS_TO_IGNORE]
if any(__A ):
continue
snake_case: Union[str, Any] = REMAINING_PATTERNS
snake_case: str = rename_state_dict_key(__A , __A )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ['dense', 'query', 'key', 'value'] ):
snake_case: int = v.T
snake_case: Any = torch.from_numpy(__A )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, f"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
snake_case: str = mapping['model.embed_positions.weight']
snake_case: Any = mapping.pop('model.embed_positions.weight' )
snake_case , snake_case: Union[str, Any] = torch_model.load_state_dict(__A , strict=__A )
snake_case: Optional[int] = [
k
for k in missing
if k
not in [
'final_logits_bias',
'model.encoder.embed_tokens.weight',
'model.decoder.embed_tokens.weight',
'lm_head.weight',
]
]
assert unexpected_missing == [], f"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], f"""no matches found for the following tf keys {extra}"""
return torch_model
def lowerCAmelCase_ ( __A : Optional[int] ):
'''simple docstring'''
snake_case: Tuple = tf.train.list_variables(__A )
snake_case: str = {}
snake_case: List[str] = ['global_step']
for name, shape in tqdm(__A , desc='converting tf checkpoint to dict' ):
snake_case: str = any(pat in name for pat in ignore_name )
if skip_key:
continue
snake_case: Any = tf.train.load_variable(__A , __A )
snake_case: Optional[int] = array
return tf_weights
def lowerCAmelCase_ ( __A : str , __A : str , __A : dict ):
'''simple docstring'''
snake_case: int = get_tf_weights_as_numpy(__A )
snake_case: int = convert_bigbird_pegasus(__A , __A )
torch_model.save_pretrained(__A )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("--tf_ckpt_path", type=str, help="passed to tf.train.list_variables")
parser.add_argument("--save_dir", default=None, type=str, help="Path to the output PyTorch model.")
__UpperCAmelCase = parser.parse_args()
__UpperCAmelCase = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update) | 692 | 0 |
'''simple docstring'''
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
__UpperCAmelCase = "python tqdm regex requests packaging filelock numpy tokenizers".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("dataclasses")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("importlib_metadata")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F'can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py')
def lowerCAmelCase_ ( __A : Optional[int] , __A : int=None ):
'''simple docstring'''
require_version(deps[pkg] , __A ) | 716 |
'''simple docstring'''
def lowerCAmelCase_ ( __A : List[str] ):
'''simple docstring'''
snake_case: str = [0] * len(__A )
snake_case: Tuple = []
snake_case: Tuple = [1] * len(__A )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(__A ) ):
if indegree[i] == 0:
queue.append(__A )
while queue:
snake_case: int = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
snake_case: Any = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(__A )
print(max(__A ) )
# Adjacency list of Graph
__UpperCAmelCase = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph) | 692 | 0 |
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
__UpperCAmelCase = object()
# For specifying empty leaf dict `{}`
__UpperCAmelCase = object()
def lowerCAmelCase_ ( __A : List[Any] , __A : Optional[int] ):
'''simple docstring'''
snake_case: Dict = tuple((re.compile(x + '$' ) for x in qs) )
for i in range(len(__A ) - len(__A ) + 1 ):
snake_case: List[Any] = [x.match(__A ) for x, y in zip(__A , ks[i:] )]
if matches and all(__A ):
return True
return False
def lowerCAmelCase_ ( __A : str ):
'''simple docstring'''
def replace(__A : Optional[int] , __A : Optional[int] ):
for rule, replacement in rules:
if _match(__A , __A ):
return replacement
return val
return replace
def lowerCAmelCase_ ( ):
'''simple docstring'''
return [
# embeddings
(("transformer", "wpe", "embedding"), P('mp' , __A )),
(("transformer", "wte", "embedding"), P('mp' , __A )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(__A , 'mp' )),
(("attention", "out_proj", "kernel"), P('mp' , __A )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(__A , 'mp' )),
(("mlp", "c_fc", "bias"), P('mp' )),
(("mlp", "c_proj", "kernel"), P('mp' , __A )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def lowerCAmelCase_ ( __A : str ):
'''simple docstring'''
snake_case: str = _get_partition_rules()
snake_case: str = _replacement_rules(__A )
snake_case: int = {k: _unmatched for k in flatten_dict(__A )}
snake_case: List[Any] = {k: replace(__A , __A ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(__A ) ) | 717 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[Any] = tempfile.mkdtemp()
snake_case: Optional[Any] = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'的',
'价',
'格',
'是',
'15',
'便',
'alex',
'##andra',
',',
'。',
'-',
't',
'shirt',
]
snake_case: Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
snake_case: Optional[int] = {
'do_resize': True,
'size': {'height': 2_24, 'width': 2_24},
'do_center_crop': True,
'crop_size': {'height': 18, 'width': 18},
'do_normalize': True,
'image_mean': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'image_std': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
'do_convert_rgb': True,
}
snake_case: Union[str, Any] = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE__ )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return BertTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[int] = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
snake_case: Tuple = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[Any] = self.get_tokenizer()
snake_case: Union[str, Any] = self.get_rust_tokenizer()
snake_case: Union[str, Any] = self.get_image_processor()
snake_case: List[str] = ChineseCLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
processor_slow.save_pretrained(self.tmpdirname )
snake_case: List[str] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=SCREAMING_SNAKE_CASE__ )
snake_case: Any = ChineseCLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
processor_fast.save_pretrained(self.tmpdirname )
snake_case: Optional[int] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(processor_fast.tokenizer , SCREAMING_SNAKE_CASE__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(processor_fast.image_processor , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[Any] = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
snake_case: Optional[int] = self.get_tokenizer(cls_token='(CLS)' , sep_token='(SEP)' )
snake_case: Union[str, Any] = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE__ )
snake_case: Union[str, Any] = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token='(CLS)' , sep_token='(SEP)' , do_normalize=SCREAMING_SNAKE_CASE__ )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[int] = self.get_image_processor()
snake_case: Tuple = self.get_tokenizer()
snake_case: Optional[Any] = ChineseCLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = self.prepare_image_inputs()
snake_case: List[Any] = image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='np' )
snake_case: Dict = processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[Any] = self.get_image_processor()
snake_case: Optional[int] = self.get_tokenizer()
snake_case: List[Any] = ChineseCLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = 'Alexandra,T-shirt的价格是15便士。'
snake_case: Union[str, Any] = processor(text=SCREAMING_SNAKE_CASE__ )
snake_case: Optional[Any] = tokenizer(SCREAMING_SNAKE_CASE__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: int = self.get_image_processor()
snake_case: Dict = self.get_tokenizer()
snake_case: Optional[int] = ChineseCLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
snake_case: Dict = 'Alexandra,T-shirt的价格是15便士。'
snake_case: Tuple = self.prepare_image_inputs()
snake_case: Any = processor(text=SCREAMING_SNAKE_CASE__ , images=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
processor()
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: str = self.get_image_processor()
snake_case: str = self.get_tokenizer()
snake_case: Union[str, Any] = ChineseCLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
snake_case: int = processor.batch_decode(SCREAMING_SNAKE_CASE__ )
snake_case: Dict = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Dict = self.get_image_processor()
snake_case: Dict = self.get_tokenizer()
snake_case: Optional[int] = ChineseCLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
snake_case: Tuple = 'Alexandra,T-shirt的价格是15便士。'
snake_case: List[Any] = self.prepare_image_inputs()
snake_case: Dict = processor(text=SCREAMING_SNAKE_CASE__ , images=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names ) | 692 | 0 |
'''simple docstring'''
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def lowerCAmelCase_ ( ):
'''simple docstring'''
snake_case: int = {
'repo_name': ['test_repo1', 'test_repo2', 'test_repo3'],
'path': ['test_1.py', 'test_2.py', 'unit_test.py'],
'content': ['a ' * 20, 'a ' * 30, 'b ' * 7],
}
snake_case: str = Dataset.from_dict(__A )
return dataset
class SCREAMING_SNAKE_CASE ( snake_case ):
'''simple docstring'''
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: int = get_dataset()
snake_case: Any = make_duplicate_clusters(SCREAMING_SNAKE_CASE__ , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: int = get_dataset()
snake_case: List[Any] = deduplicate_dataset(SCREAMING_SNAKE_CASE__ )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , 2 )
print(SCREAMING_SNAKE_CASE__ )
self.assertEqual(duplicate_clusters[0][0]['copies'] , 2 )
self.assertEqual(duplicate_clusters[0][0]['is_extreme'] , SCREAMING_SNAKE_CASE__ ) | 718 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"microsoft/swinv2-tiny-patch4-window8-256": (
"https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"
),
}
class SCREAMING_SNAKE_CASE ( snake_case ):
'''simple docstring'''
__UpperCamelCase = "swinv2"
__UpperCamelCase = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , SCREAMING_SNAKE_CASE__=2_24 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=96 , SCREAMING_SNAKE_CASE__=[2, 2, 6, 2] , SCREAMING_SNAKE_CASE__=[3, 6, 12, 24] , SCREAMING_SNAKE_CASE__=7 , SCREAMING_SNAKE_CASE__=4.0 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-5 , SCREAMING_SNAKE_CASE__=32 , **SCREAMING_SNAKE_CASE__ , ):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE__ )
snake_case: int = image_size
snake_case: Union[str, Any] = patch_size
snake_case: List[str] = num_channels
snake_case: Tuple = embed_dim
snake_case: str = depths
snake_case: Any = len(SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = num_heads
snake_case: Optional[int] = window_size
snake_case: Any = mlp_ratio
snake_case: Optional[int] = qkv_bias
snake_case: Union[str, Any] = hidden_dropout_prob
snake_case: List[str] = attention_probs_dropout_prob
snake_case: Dict = drop_path_rate
snake_case: List[str] = hidden_act
snake_case: int = use_absolute_embeddings
snake_case: Any = layer_norm_eps
snake_case: Dict = initializer_range
snake_case: List[Any] = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
snake_case: Tuple = int(embed_dim * 2 ** (len(SCREAMING_SNAKE_CASE__ ) - 1) )
snake_case: Union[str, Any] = (0, 0, 0, 0) | 692 | 0 |
'''simple docstring'''
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class SCREAMING_SNAKE_CASE ( snake_case ):
'''simple docstring'''
@staticmethod
@abstractmethod
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def _UpperCamelCase ( self ):
'''simple docstring'''
raise NotImplementedError()
| 719 |
'''simple docstring'''
import os
import sys
import unittest
__UpperCAmelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__UpperCAmelCase = os.path.join(git_repo_path, "src", "transformers")
__UpperCAmelCase = "\n{0} = None\n"
__UpperCAmelCase = "\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n"
__UpperCAmelCase = "\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n"
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[str] = find_backend(' _import_structure["models.albert"].append("AlbertTokenizerFast")' )
self.assertIsNone(SCREAMING_SNAKE_CASE__ )
snake_case: List[str] = find_backend(' if not is_tokenizers_available():' )
self.assertEqual(SCREAMING_SNAKE_CASE__ , 'tokenizers' )
snake_case: List[Any] = find_backend(' if not is_tensorflow_text_available():' )
self.assertEqual(SCREAMING_SNAKE_CASE__ , 'tensorflow_text' )
snake_case: int = find_backend(' if not (is_sentencepiece_available() and is_tokenizers_available()):' )
self.assertEqual(SCREAMING_SNAKE_CASE__ , 'sentencepiece_and_tokenizers' )
snake_case: Optional[Any] = find_backend(
' if not (is_sentencepiece_available() and is_tensorflow_text_available()):' )
self.assertEqual(SCREAMING_SNAKE_CASE__ , 'sentencepiece_and_tensorflow_text' )
snake_case: Dict = find_backend(
' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):' )
self.assertEqual(SCREAMING_SNAKE_CASE__ , 'sentencepiece_and_tokenizers_and_vision' )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: str = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('torch' , SCREAMING_SNAKE_CASE__ )
self.assertIn('tensorflow_text' , SCREAMING_SNAKE_CASE__ )
self.assertIn('sentencepiece_and_tokenizers' , SCREAMING_SNAKE_CASE__ )
# Likewise, we can't assert on the exact content of a key
self.assertIn('BertModel' , objects['torch'] )
self.assertIn('TFBertModel' , objects['tf'] )
self.assertIn('FlaxBertModel' , objects['flax'] )
self.assertIn('BertModel' , objects['torch'] )
self.assertIn('TFBertTokenizer' , objects['tensorflow_text'] )
self.assertIn('convert_slow_tokenizer' , objects['sentencepiece_and_tokenizers'] )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Dict = create_dummy_object('CONSTANT' , '\'torch\'' )
self.assertEqual(SCREAMING_SNAKE_CASE__ , '\nCONSTANT = None\n' )
snake_case: Any = create_dummy_object('function' , '\'torch\'' )
self.assertEqual(
SCREAMING_SNAKE_CASE__ , '\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n' )
snake_case: Optional[int] = '\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n'
snake_case: Tuple = create_dummy_object('FakeClass' , '\'torch\'' )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Dict = '# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, ["torch"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = ["torch"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, ["torch"])\n'
snake_case: Optional[int] = create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']} )
self.assertEqual(dummy_files['torch'] , SCREAMING_SNAKE_CASE__ ) | 692 | 0 |
'''simple docstring'''
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
__UpperCAmelCase = 0b101100111110110010010000011110111011000110011110
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
__UpperCAmelCase = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self ):
'''simple docstring'''
snake_case: List[Any] = WATERMARK_BITS
snake_case: Any = WatermarkEncoder()
self.encoder.set_watermark('bits' , self.watermark )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if images.shape[-1] < 2_56:
return images
snake_case: str = (2_55 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
snake_case: str = [self.encoder.encode(SCREAMING_SNAKE_CASE__ , 'dwtDct' ) for image in images]
snake_case: List[Any] = torch.from_numpy(np.array(SCREAMING_SNAKE_CASE__ ) ).permute(0 , 3 , 1 , 2 )
snake_case: Union[str, Any] = torch.clamp(2 * (images / 2_55 - 0.5) , min=-1.0 , max=1.0 )
return images | 720 |
'''simple docstring'''
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
__UpperCAmelCase = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: Dict = question_encoder
snake_case: Union[str, Any] = generator
snake_case: Optional[int] = self.question_encoder
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if os.path.isfile(SCREAMING_SNAKE_CASE__ ):
raise ValueError(F"""Provided path ({save_directory}) should be a directory, not a file""" )
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
snake_case: Tuple = os.path.join(SCREAMING_SNAKE_CASE__ , 'question_encoder_tokenizer' )
snake_case: Tuple = os.path.join(SCREAMING_SNAKE_CASE__ , 'generator_tokenizer' )
self.question_encoder.save_pretrained(SCREAMING_SNAKE_CASE__ )
self.generator.save_pretrained(SCREAMING_SNAKE_CASE__ )
@classmethod
def _UpperCamelCase ( cls , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
from ..auto.tokenization_auto import AutoTokenizer
snake_case: int = kwargs.pop('config' , SCREAMING_SNAKE_CASE__ )
if config is None:
snake_case: str = RagConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = AutoTokenizer.from_pretrained(
SCREAMING_SNAKE_CASE__ , config=config.question_encoder , subfolder='question_encoder_tokenizer' )
snake_case: Dict = AutoTokenizer.from_pretrained(
SCREAMING_SNAKE_CASE__ , config=config.generator , subfolder='generator_tokenizer' )
return cls(question_encoder=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ )
def __call__( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return self.current_tokenizer(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return self.generator.batch_decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return self.generator.decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Union[str, Any] = self.question_encoder
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Dict = self.generator
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = "longest" , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = True , **SCREAMING_SNAKE_CASE__ , ):
'''simple docstring'''
warnings.warn(
'`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the '
'regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` '
'context manager to prepare your targets. See the documentation of your specific tokenizer for more '
'details' , SCREAMING_SNAKE_CASE__ , )
if max_length is None:
snake_case: Optional[Any] = self.current_tokenizer.model_max_length
snake_case: int = self(
SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
snake_case: Any = self.current_tokenizer.model_max_length
snake_case: List[str] = self(
text_target=SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
snake_case: Dict = labels['input_ids']
return model_inputs | 692 | 0 |
'''simple docstring'''
def lowerCAmelCase_ ( __A : int ):
'''simple docstring'''
if n == 1 or not isinstance(__A , __A ):
return 0
elif n == 2:
return 1
else:
snake_case: str = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def lowerCAmelCase_ ( __A : int ):
'''simple docstring'''
snake_case: str = 0
snake_case: Any = 2
while digits < n:
index += 1
snake_case: Any = len(str(fibonacci(__A ) ) )
return index
def lowerCAmelCase_ ( __A : int = 10_00 ):
'''simple docstring'''
return fibonacci_digits_index(__A )
if __name__ == "__main__":
print(solution(int(str(input()).strip()))) | 721 |
'''simple docstring'''
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def lowerCAmelCase_ ( __A : Tuple ):
'''simple docstring'''
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def lowerCAmelCase_ ( ):
'''simple docstring'''
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def lowerCAmelCase_ ( ):
'''simple docstring'''
snake_case: Union[str, Any] = 'mock-s3-bucket'
snake_case: int = f"""s3://{mock_bucket}"""
snake_case: Any = extract_path_from_uri(__A )
assert dataset_path.startswith('s3://' ) is False
snake_case: Union[str, Any] = './local/path'
snake_case: Union[str, Any] = extract_path_from_uri(__A )
assert dataset_path == new_dataset_path
def lowerCAmelCase_ ( __A : Any ):
'''simple docstring'''
snake_case: List[str] = is_remote_filesystem(__A )
assert is_remote is True
snake_case: int = fsspec.filesystem('file' )
snake_case: int = is_remote_filesystem(__A )
assert is_remote is False
@pytest.mark.parametrize('compression_fs_class' , __A )
def lowerCAmelCase_ ( __A : Optional[int] , __A : int , __A : str , __A : Optional[Any] , __A : List[str] , __A : Optional[Any] , __A : Optional[int] ):
'''simple docstring'''
snake_case: Optional[Any] = {'gzip': gz_file, 'xz': xz_file, 'zstd': zstd_file, 'bz2': bza_file, 'lz4': lza_file}
snake_case: Optional[int] = input_paths[compression_fs_class.protocol]
if input_path is None:
snake_case: str = f"""for '{compression_fs_class.protocol}' compression protocol, """
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__A )
snake_case: List[str] = fsspec.filesystem(compression_fs_class.protocol , fo=__A )
assert isinstance(__A , __A )
snake_case: Any = os.path.basename(__A )
snake_case: int = expected_filename[: expected_filename.rindex('.' )]
assert fs.glob('*' ) == [expected_filename]
with fs.open(__A , 'r' , encoding='utf-8' ) as f, open(__A , encoding='utf-8' ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize('protocol' , ['zip', 'gzip'] )
def lowerCAmelCase_ ( __A : Any , __A : int , __A : int ):
'''simple docstring'''
snake_case: List[str] = {'zip': zip_jsonl_path, 'gzip': jsonl_gz_path}
snake_case: str = compressed_file_paths[protocol]
snake_case: Dict = 'dataset.jsonl'
snake_case: Optional[Any] = f"""{protocol}://{member_file_path}::{compressed_file_path}"""
snake_case , *snake_case: List[Any] = fsspec.get_fs_token_paths(__A )
assert fs.isfile(__A )
assert not fs.isfile('non_existing_' + member_file_path )
@pytest.mark.integration
def lowerCAmelCase_ ( __A : Optional[Any] , __A : Union[str, Any] , __A : Union[str, Any] , __A : List[Any] ):
'''simple docstring'''
snake_case: Tuple = hf_api.dataset_info(__A , token=__A )
snake_case: List[str] = HfFileSystem(repo_info=__A , token=__A )
assert sorted(hffs.glob('*' ) ) == [".gitattributes", "data"]
assert hffs.isdir('data' )
assert hffs.isfile('.gitattributes' ) and hffs.isfile('data/text_data.txt' )
with open(__A ) as f:
assert hffs.open('data/text_data.txt' , 'r' ).read() == f.read()
def lowerCAmelCase_ ( ):
'''simple docstring'''
snake_case: Union[str, Any] = 'bz2'
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(__A , __A , clobber=__A )
with pytest.warns(__A ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(__A ) == 1
assert (
str(warning_info[0].message )
== f"""A filesystem protocol was already set for {protocol} and will be overwritten."""
) | 692 | 0 |
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"pipelines_utils",
"0.22.0",
"Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.",
standard_warn=False,
stacklevel=3,
) | 700 |
'''simple docstring'''
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.17.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
__UpperCAmelCase = logging.getLogger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
__UpperCamelCase = field(
default="tab_fact" , metadata={"help": "The name of the dataset to use (via the datasets library)."} )
__UpperCamelCase = field(
default="tab_fact" , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} , )
__UpperCamelCase = field(
default=1024 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__UpperCamelCase = field(
default=snake_case , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
__UpperCamelCase = field(
default=snake_case , metadata={
"help": (
"Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
)
} , )
__UpperCamelCase = field(
default=snake_case , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
__UpperCamelCase = field(
default=snake_case , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
__UpperCamelCase = field(
default=snake_case , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
)
} , )
__UpperCamelCase = field(
default=snake_case , metadata={"help": "A csv or a json file containing the training data."} )
__UpperCamelCase = field(
default=snake_case , metadata={"help": "A csv or a json file containing the validation data."} )
__UpperCamelCase = field(default=snake_case , metadata={"help": "A csv or a json file containing the test data."} )
def _UpperCamelCase ( self ):
'''simple docstring'''
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('Need either a GLUE task, a training/validation file or a dataset name.' )
else:
snake_case: str = self.train_file.split('.' )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
snake_case: Optional[Any] = self.validation_file.split('.' )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
__UpperCamelCase = field(
default=snake_case , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
__UpperCamelCase = field(
default=snake_case , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
__UpperCamelCase = field(
default=snake_case , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
__UpperCamelCase = field(
default=snake_case , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
__UpperCamelCase = field(
default=snake_case , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
__UpperCamelCase = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
__UpperCamelCase = field(
default=snake_case , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
def lowerCAmelCase_ ( ):
'''simple docstring'''
snake_case: Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
snake_case , snake_case , snake_case: Union[str, Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
snake_case , snake_case , snake_case: str = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
snake_case: Tuple = training_args.get_process_log_level()
logger.setLevel(__A )
datasets.utils.logging.set_verbosity(__A )
transformers.utils.logging.set_verbosity(__A )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
snake_case: Any = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
snake_case: List[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
snake_case: int = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
snake_case: Optional[int] = {'train': data_args.train_file, 'validation': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
snake_case: Tuple = data_args.train_file.split('.' )[-1]
snake_case: Union[str, Any] = data_args.test_file.split('.' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
snake_case: Union[str, Any] = data_args.test_file
else:
raise ValueError('Need either a GLUE task or a test file for `do_predict`.' )
for key in data_files.keys():
logger.info(f"""load a local file for {key}: {data_files[key]}""" )
if data_args.train_file.endswith('.csv' ):
# Loading a dataset from local csv files
snake_case: List[Any] = load_dataset('csv' , data_files=__A , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
snake_case: Optional[Any] = load_dataset('json' , data_files=__A , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
snake_case: Tuple = raw_datasets['train'].features['label'].names
snake_case: List[str] = len(__A )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case: Tuple = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__A , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
snake_case: List[str] = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=__A , )
snake_case: Union[str, Any] = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=__A , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
snake_case: int = 'max_length'
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
snake_case: Union[str, Any] = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
snake_case: Optional[Any] = {'Refused': 0, 'Entailed': 1}
snake_case: List[Any] = {0: 'Refused', 1: 'Entailed'}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
snake_case: List[str] = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(__A : Any ):
# Tokenize the texts
def _convert_table_text_to_pandas(__A : Dict ):
snake_case: str = [_table_row.split('#' ) for _table_row in _table_text.strip('\n' ).split('\n' )]
snake_case: List[str] = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
snake_case: str = examples['statement']
snake_case: int = list(map(_convert_table_text_to_pandas , examples['table_text'] ) )
snake_case: List[Any] = tokenizer(__A , __A , padding=__A , max_length=__A , truncation=__A )
snake_case: List[Any] = examples['label']
return result
with training_args.main_process_first(desc='dataset map pre-processing' ):
snake_case: int = raw_datasets.map(
__A , batched=__A , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on dataset' , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('--do_train requires a train dataset' )
snake_case: List[str] = raw_datasets['train']
if data_args.max_train_samples is not None:
snake_case: Tuple = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('--do_eval requires a validation dataset' )
snake_case: Any = raw_datasets['validation']
if data_args.max_eval_samples is not None:
snake_case: Optional[int] = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('--do_predict requires a test dataset' )
snake_case: str = raw_datasets['test']
if data_args.max_predict_samples is not None:
snake_case: List[str] = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(__A ) ) , 3 ):
logger.info(f"""Sample {index} of the training set: {train_dataset[index]}.""" )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(__A : EvalPrediction ):
snake_case: int = p.predictions[0] if isinstance(p.predictions , __A ) else p.predictions
snake_case: List[str] = np.argmax(__A , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
snake_case: str = default_data_collator
elif training_args.fpaa:
snake_case: List[str] = DataCollatorWithPadding(__A , pad_to_multiple_of=8 )
else:
snake_case: List[Any] = None
# Initialize our Trainer
snake_case: List[str] = Trainer(
model=__A , args=__A , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=__A , tokenizer=__A , data_collator=__A , )
# Training
if training_args.do_train:
snake_case: Optional[int] = None
if training_args.resume_from_checkpoint is not None:
snake_case: str = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
snake_case: Optional[Any] = last_checkpoint
snake_case: Union[str, Any] = trainer.train(resume_from_checkpoint=__A )
snake_case: List[Any] = train_result.metrics
snake_case: List[Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__A )
)
snake_case: Optional[Any] = min(__A , len(__A ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('train' , __A )
trainer.save_metrics('train' , __A )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
snake_case: Dict = trainer.evaluate(eval_dataset=__A )
snake_case: Optional[int] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__A )
snake_case: Dict = min(__A , len(__A ) )
trainer.log_metrics('eval' , __A )
trainer.save_metrics('eval' , __A )
if training_args.do_predict:
logger.info('*** Predict ***' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
snake_case: Optional[int] = predict_dataset.remove_columns('label' )
snake_case: str = trainer.predict(__A , metric_key_prefix='predict' ).predictions
snake_case: Any = np.argmax(__A , axis=1 )
snake_case: int = os.path.join(training_args.output_dir , 'predict_results_tabfact.txt' )
if trainer.is_world_process_zero():
with open(__A , 'w' ) as writer:
logger.info('***** Predict Results *****' )
writer.write('index\tprediction\n' )
for index, item in enumerate(__A ):
snake_case: int = label_list[item]
writer.write(f"""{index}\t{item}\n""" )
snake_case: Optional[int] = {'finetuned_from': model_args.model_name_or_path, 'tasks': 'text-classification'}
if training_args.push_to_hub:
trainer.push_to_hub(**__A )
else:
trainer.create_model_card(**__A )
def lowerCAmelCase_ ( __A : Optional[Any] ):
'''simple docstring'''
main()
if __name__ == "__main__":
main() | 692 | 0 |
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
__UpperCAmelCase = "pt"
elif is_tf_available():
__UpperCAmelCase = "tf"
else:
__UpperCAmelCase = "jax"
class SCREAMING_SNAKE_CASE ( snake_case , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = ByTaTokenizer
__UpperCamelCase = False
def _UpperCamelCase ( self ):
'''simple docstring'''
super().setUp()
snake_case: int = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _UpperCamelCase ( self ):
'''simple docstring'''
return ByTaTokenizer.from_pretrained('google/byt5-small' )
def _UpperCamelCase ( self , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=20 , SCREAMING_SNAKE_CASE__=5 ):
'''simple docstring'''
snake_case: Optional[Any] = []
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
try:
snake_case: Optional[Any] = tokenizer.decode([i] , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
snake_case: List[str] = list(filter(lambda SCREAMING_SNAKE_CASE__ : re.match(r'^[ a-zA-Z]+$' , t[1] ) , SCREAMING_SNAKE_CASE__ ) )
snake_case: str = list(filter(lambda SCREAMING_SNAKE_CASE__ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ) )
if max_length is not None and len(SCREAMING_SNAKE_CASE__ ) > max_length:
snake_case: Union[str, Any] = toks[:max_length]
if min_length is not None and len(SCREAMING_SNAKE_CASE__ ) < min_length and len(SCREAMING_SNAKE_CASE__ ) > 0:
while len(SCREAMING_SNAKE_CASE__ ) < min_length:
snake_case: Tuple = toks + toks
# toks_str = [t[1] for t in toks]
snake_case: Dict = [t[0] for t in toks]
# Ensure consistency
snake_case: int = tokenizer.decode(SCREAMING_SNAKE_CASE__ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__ )
if " " not in output_txt and len(SCREAMING_SNAKE_CASE__ ) > 1:
snake_case: str = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__ )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__ )
)
if with_prefix_space:
snake_case: Tuple = ' ' + output_txt
snake_case: Optional[int] = tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
return output_txt, output_ids
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Any = self.ta_base_tokenizer
snake_case: str = tokenizer(['hi</s>', 'I went to the gym</s>', '</s>'] )
snake_case: List[Any] = tokenizer(['hi', 'I went to the gym', ''] )
self.assertListEqual(batch_with_eos_added['input_ids'] , batch_without_eos_added['input_ids'] )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Any = self.ta_base_tokenizer
snake_case: Union[str, Any] = 'Unicode €.'
snake_case: str = tokenizer(SCREAMING_SNAKE_CASE__ )
snake_case: Tuple = [88, 1_13, 1_08, 1_02, 1_14, 1_03, 1_04, 35, 2_29, 1_33, 1_75, 49, 1]
self.assertEqual(encoded['input_ids'] , SCREAMING_SNAKE_CASE__ )
# decoding
snake_case: List[str] = tokenizer.decode(SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , 'Unicode €.</s>' )
snake_case: List[Any] = tokenizer('e è é ê ë' )
snake_case: Optional[Any] = [1_04, 35, 1_98, 1_71, 35, 1_98, 1_72, 35, 1_98, 1_73, 35, 1_98, 1_74, 1]
self.assertEqual(encoded['input_ids'] , SCREAMING_SNAKE_CASE__ )
# decoding
snake_case: List[Any] = tokenizer.decode(SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , 'e è é ê ë</s>' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , 'e è é ê ë</s>' )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: str = self.ta_base_tokenizer
snake_case: Optional[Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
snake_case: Optional[int] = [68, 35, 1_11, 1_14, 1_13, 1_06, 35, 1_15, 1_00, 1_17, 1_00, 1_06, 1_17, 1_00, 1_15, 1_07, 35, 1_05, 1_14, 1_17, 35, 1_18, 1_20, 1_12, 1_12, 1_00, 1_17, 1_08, 1_25, 1_00, 1_19, 1_08, 1_14, 1_13, 49, 1, 0]
# fmt: on
snake_case: str = tokenizer(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if FRAMEWORK != "jax":
snake_case: Optional[Any] = list(batch.input_ids.numpy()[0] )
else:
snake_case: Dict = list(batch.input_ids.tolist()[0] )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Dict = self.ta_base_tokenizer
snake_case: List[str] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
snake_case: Union[str, Any] = tokenizer(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , SCREAMING_SNAKE_CASE__ )
self.assertIn('attention_mask' , SCREAMING_SNAKE_CASE__ )
self.assertNotIn('decoder_input_ids' , SCREAMING_SNAKE_CASE__ )
self.assertNotIn('decoder_attention_mask' , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[str] = self.ta_base_tokenizer
snake_case: str = [
'Summary of the text.',
'Another summary.',
]
snake_case: Dict = tokenizer(
text_target=SCREAMING_SNAKE_CASE__ , max_length=32 , padding='max_length' , truncation=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ )
self.assertEqual(32 , targets['input_ids'].shape[1] )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Union[str, Any] = self.ta_base_tokenizer
snake_case: Optional[int] = ['A long paragraph for summarization. </s>']
snake_case: str = ['Summary of the text. </s>']
# fmt: off
snake_case: str = [68, 35, 1_11, 1_14, 1_13, 1_06, 35, 1_15, 1_00, 1_17, 1_00, 1_06, 1_17, 1_00, 1_15, 1_07, 35, 1_05, 1_14, 1_17, 35, 1_18, 1_20, 1_12, 1_12, 1_00, 1_17, 1_08, 1_25, 1_00, 1_19, 1_08, 1_14, 1_13, 49, 35, 1]
snake_case: Optional[int] = [86, 1_20, 1_12, 1_12, 1_00, 1_17, 1_24, 35, 1_14, 1_05, 35, 1_19, 1_07, 1_04, 35, 1_19, 1_04, 1_23, 1_19, 49, 35, 1]
# fmt: on
snake_case: List[Any] = tokenizer(SCREAMING_SNAKE_CASE__ , text_target=SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , batch['input_ids'][0] )
self.assertEqual(SCREAMING_SNAKE_CASE__ , batch['labels'][0] )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Any = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
snake_case: Optional[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
snake_case: Union[str, Any] = tempfile.mkdtemp()
snake_case: Dict = ' He is very happy, UNwant\u00E9d,running'
snake_case: Optional[int] = tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = tokenizer.__class__.from_pretrained(SCREAMING_SNAKE_CASE__ )
snake_case: Any = after_tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
shutil.rmtree(SCREAMING_SNAKE_CASE__ )
snake_case: Any = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
snake_case: List[str] = tempfile.mkdtemp()
snake_case: str = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
snake_case: List[str] = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
snake_case: int = tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
snake_case: Tuple = tokenizer.__class__.from_pretrained(SCREAMING_SNAKE_CASE__ )
snake_case: Tuple = after_tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
snake_case: Union[str, Any] = tokenizer.__class__.from_pretrained(SCREAMING_SNAKE_CASE__ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Union[str, Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(SCREAMING_SNAKE_CASE__ )
with open(os.path.join(SCREAMING_SNAKE_CASE__ , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
snake_case: Any = json.load(SCREAMING_SNAKE_CASE__ )
with open(os.path.join(SCREAMING_SNAKE_CASE__ , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
snake_case: str = json.load(SCREAMING_SNAKE_CASE__ )
snake_case: int = [F"""<extra_id_{i}>""" for i in range(1_25 )]
snake_case: Optional[int] = added_tokens_extra_ids + [
'an_additional_special_token'
]
snake_case: str = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(SCREAMING_SNAKE_CASE__ , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
with open(os.path.join(SCREAMING_SNAKE_CASE__ , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
snake_case: Dict = tokenizer_class.from_pretrained(
SCREAMING_SNAKE_CASE__ , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
snake_case: Union[str, Any] = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=SCREAMING_SNAKE_CASE__ )]
snake_case: Union[str, Any] = tokenizer_class.from_pretrained(
SCREAMING_SNAKE_CASE__ , additional_special_tokens=SCREAMING_SNAKE_CASE__ , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[str] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(SCREAMING_SNAKE_CASE__ )
snake_case: Optional[Any] = tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertTrue(tokenizer.decode([2_55] ) == '' )
def _UpperCamelCase ( self ):
'''simple docstring'''
pass
def _UpperCamelCase ( self ):
'''simple docstring'''
pass
def _UpperCamelCase ( self ):
'''simple docstring'''
pass
def _UpperCamelCase ( self ):
'''simple docstring'''
pass
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Dict = self.get_tokenizers(fast=SCREAMING_SNAKE_CASE__ , do_lower_case=SCREAMING_SNAKE_CASE__ )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
snake_case: Union[str, Any] = ['t', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 'x', 't', '</s>']
snake_case: List[str] = tokenizer.convert_tokens_to_string(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: int = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
snake_case: Optional[Any] = [
'bos_token',
'eos_token',
'unk_token',
'sep_token',
'pad_token',
'cls_token',
'mask_token',
]
snake_case: Dict = 0
snake_case: List[Any] = tokenizer.convert_ids_to_tokens(
SCREAMING_SNAKE_CASE__ , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
for attr in attributes_list:
setattr(SCREAMING_SNAKE_CASE__ , attr + '_id' , SCREAMING_SNAKE_CASE__ )
self.assertEqual(getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
self.assertEqual(getattr(SCREAMING_SNAKE_CASE__ , attr + '_id' ) , SCREAMING_SNAKE_CASE__ )
setattr(SCREAMING_SNAKE_CASE__ , attr + '_id' , SCREAMING_SNAKE_CASE__ )
self.assertEqual(getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
self.assertEqual(getattr(SCREAMING_SNAKE_CASE__ , attr + '_id' ) , SCREAMING_SNAKE_CASE__ )
setattr(SCREAMING_SNAKE_CASE__ , 'additional_special_tokens_ids' , [] )
self.assertListEqual(getattr(SCREAMING_SNAKE_CASE__ , 'additional_special_tokens' ) , [] )
self.assertListEqual(getattr(SCREAMING_SNAKE_CASE__ , 'additional_special_tokens_ids' ) , [] )
setattr(SCREAMING_SNAKE_CASE__ , 'additional_special_tokens_ids' , [token_id_to_test_setters] )
self.assertListEqual(getattr(SCREAMING_SNAKE_CASE__ , 'additional_special_tokens' ) , [token_to_test_setters] )
self.assertListEqual(getattr(SCREAMING_SNAKE_CASE__ , 'additional_special_tokens_ids' ) , [token_id_to_test_setters] ) | 701 |
'''simple docstring'''
import math
def lowerCAmelCase_ ( __A : int ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__A ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCAmelCase_ ( __A : float = 0.1 ):
'''simple docstring'''
snake_case: Optional[int] = 3
snake_case: int = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(__A )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod() | 692 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = "▁"
__UpperCAmelCase = {"vocab_file": "sentencepiece.bpe.model"}
__UpperCAmelCase = {
"vocab_file": {
"facebook/xglm-564M": "https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model",
}
}
__UpperCAmelCase = {
"facebook/xglm-564M": 2_048,
}
class SCREAMING_SNAKE_CASE ( snake_case ):
'''simple docstring'''
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = ["input_ids", "attention_mask"]
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="<unk>" , SCREAMING_SNAKE_CASE__="<pad>" , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ):
'''simple docstring'''
snake_case: Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
snake_case: Optional[Any] = 7
snake_case: List[str] = [F"""<madeupword{i}>""" for i in range(self.num_madeup_words )]
snake_case: str = kwargs.get('additional_special_tokens' , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE__ , )
snake_case: int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(SCREAMING_SNAKE_CASE__ ) )
snake_case: int = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
snake_case: Tuple = 1
# Mimic fairseq token-to-id alignment for the first 4 token
snake_case: Optional[Any] = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
snake_case: Union[str, Any] = len(self.sp_model )
snake_case: str = {F"""<madeupword{i}>""": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(SCREAMING_SNAKE_CASE__ )
snake_case: Union[str, Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ):
'''simple docstring'''
snake_case: List[Any] = self.__dict__.copy()
snake_case: Union[str, Any] = None
snake_case: Union[str, Any] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: Optional[int] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
snake_case: Union[str, Any] = {}
snake_case: Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
snake_case: Optional[Any] = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE__ , token_ids_a=SCREAMING_SNAKE_CASE__ , already_has_special_tokens=SCREAMING_SNAKE_CASE__ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ ))
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE__ ))
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
'''simple docstring'''
snake_case: int = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[int] = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return self.sp_model.encode(SCREAMING_SNAKE_CASE__ , out_type=SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
snake_case: Dict = self.sp_model.PieceToId(SCREAMING_SNAKE_CASE__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: Optional[Any] = ''.join(SCREAMING_SNAKE_CASE__ ).replace(SCREAMING_SNAKE_CASE__ , ' ' ).strip()
return out_string
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
'''simple docstring'''
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case: List[str] = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE__ , 'wb' ) as fi:
snake_case: int = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE__ )
return (out_vocab_file,)
| 702 |
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
__UpperCAmelCase = "pt"
elif is_tf_available():
__UpperCAmelCase = "tf"
else:
__UpperCAmelCase = "jax"
class SCREAMING_SNAKE_CASE ( snake_case , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = ByTaTokenizer
__UpperCamelCase = False
def _UpperCamelCase ( self ):
'''simple docstring'''
super().setUp()
snake_case: int = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _UpperCamelCase ( self ):
'''simple docstring'''
return ByTaTokenizer.from_pretrained('google/byt5-small' )
def _UpperCamelCase ( self , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=20 , SCREAMING_SNAKE_CASE__=5 ):
'''simple docstring'''
snake_case: Optional[Any] = []
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
try:
snake_case: Optional[Any] = tokenizer.decode([i] , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
snake_case: List[str] = list(filter(lambda SCREAMING_SNAKE_CASE__ : re.match(r'^[ a-zA-Z]+$' , t[1] ) , SCREAMING_SNAKE_CASE__ ) )
snake_case: str = list(filter(lambda SCREAMING_SNAKE_CASE__ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ) )
if max_length is not None and len(SCREAMING_SNAKE_CASE__ ) > max_length:
snake_case: Union[str, Any] = toks[:max_length]
if min_length is not None and len(SCREAMING_SNAKE_CASE__ ) < min_length and len(SCREAMING_SNAKE_CASE__ ) > 0:
while len(SCREAMING_SNAKE_CASE__ ) < min_length:
snake_case: Tuple = toks + toks
# toks_str = [t[1] for t in toks]
snake_case: Dict = [t[0] for t in toks]
# Ensure consistency
snake_case: int = tokenizer.decode(SCREAMING_SNAKE_CASE__ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__ )
if " " not in output_txt and len(SCREAMING_SNAKE_CASE__ ) > 1:
snake_case: str = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__ )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__ )
)
if with_prefix_space:
snake_case: Tuple = ' ' + output_txt
snake_case: Optional[int] = tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
return output_txt, output_ids
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Any = self.ta_base_tokenizer
snake_case: str = tokenizer(['hi</s>', 'I went to the gym</s>', '</s>'] )
snake_case: List[Any] = tokenizer(['hi', 'I went to the gym', ''] )
self.assertListEqual(batch_with_eos_added['input_ids'] , batch_without_eos_added['input_ids'] )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Any = self.ta_base_tokenizer
snake_case: Union[str, Any] = 'Unicode €.'
snake_case: str = tokenizer(SCREAMING_SNAKE_CASE__ )
snake_case: Tuple = [88, 1_13, 1_08, 1_02, 1_14, 1_03, 1_04, 35, 2_29, 1_33, 1_75, 49, 1]
self.assertEqual(encoded['input_ids'] , SCREAMING_SNAKE_CASE__ )
# decoding
snake_case: List[str] = tokenizer.decode(SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , 'Unicode €.</s>' )
snake_case: List[Any] = tokenizer('e è é ê ë' )
snake_case: Optional[Any] = [1_04, 35, 1_98, 1_71, 35, 1_98, 1_72, 35, 1_98, 1_73, 35, 1_98, 1_74, 1]
self.assertEqual(encoded['input_ids'] , SCREAMING_SNAKE_CASE__ )
# decoding
snake_case: List[Any] = tokenizer.decode(SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , 'e è é ê ë</s>' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , 'e è é ê ë</s>' )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: str = self.ta_base_tokenizer
snake_case: Optional[Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
snake_case: Optional[int] = [68, 35, 1_11, 1_14, 1_13, 1_06, 35, 1_15, 1_00, 1_17, 1_00, 1_06, 1_17, 1_00, 1_15, 1_07, 35, 1_05, 1_14, 1_17, 35, 1_18, 1_20, 1_12, 1_12, 1_00, 1_17, 1_08, 1_25, 1_00, 1_19, 1_08, 1_14, 1_13, 49, 1, 0]
# fmt: on
snake_case: str = tokenizer(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if FRAMEWORK != "jax":
snake_case: Optional[Any] = list(batch.input_ids.numpy()[0] )
else:
snake_case: Dict = list(batch.input_ids.tolist()[0] )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Dict = self.ta_base_tokenizer
snake_case: List[str] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
snake_case: Union[str, Any] = tokenizer(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , SCREAMING_SNAKE_CASE__ )
self.assertIn('attention_mask' , SCREAMING_SNAKE_CASE__ )
self.assertNotIn('decoder_input_ids' , SCREAMING_SNAKE_CASE__ )
self.assertNotIn('decoder_attention_mask' , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[str] = self.ta_base_tokenizer
snake_case: str = [
'Summary of the text.',
'Another summary.',
]
snake_case: Dict = tokenizer(
text_target=SCREAMING_SNAKE_CASE__ , max_length=32 , padding='max_length' , truncation=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ )
self.assertEqual(32 , targets['input_ids'].shape[1] )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Union[str, Any] = self.ta_base_tokenizer
snake_case: Optional[int] = ['A long paragraph for summarization. </s>']
snake_case: str = ['Summary of the text. </s>']
# fmt: off
snake_case: str = [68, 35, 1_11, 1_14, 1_13, 1_06, 35, 1_15, 1_00, 1_17, 1_00, 1_06, 1_17, 1_00, 1_15, 1_07, 35, 1_05, 1_14, 1_17, 35, 1_18, 1_20, 1_12, 1_12, 1_00, 1_17, 1_08, 1_25, 1_00, 1_19, 1_08, 1_14, 1_13, 49, 35, 1]
snake_case: Optional[int] = [86, 1_20, 1_12, 1_12, 1_00, 1_17, 1_24, 35, 1_14, 1_05, 35, 1_19, 1_07, 1_04, 35, 1_19, 1_04, 1_23, 1_19, 49, 35, 1]
# fmt: on
snake_case: List[Any] = tokenizer(SCREAMING_SNAKE_CASE__ , text_target=SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , batch['input_ids'][0] )
self.assertEqual(SCREAMING_SNAKE_CASE__ , batch['labels'][0] )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Any = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
snake_case: Optional[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
snake_case: Union[str, Any] = tempfile.mkdtemp()
snake_case: Dict = ' He is very happy, UNwant\u00E9d,running'
snake_case: Optional[int] = tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = tokenizer.__class__.from_pretrained(SCREAMING_SNAKE_CASE__ )
snake_case: Any = after_tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
shutil.rmtree(SCREAMING_SNAKE_CASE__ )
snake_case: Any = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
snake_case: List[str] = tempfile.mkdtemp()
snake_case: str = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
snake_case: List[str] = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
snake_case: int = tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
snake_case: Tuple = tokenizer.__class__.from_pretrained(SCREAMING_SNAKE_CASE__ )
snake_case: Tuple = after_tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
snake_case: Union[str, Any] = tokenizer.__class__.from_pretrained(SCREAMING_SNAKE_CASE__ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Union[str, Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(SCREAMING_SNAKE_CASE__ )
with open(os.path.join(SCREAMING_SNAKE_CASE__ , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
snake_case: Any = json.load(SCREAMING_SNAKE_CASE__ )
with open(os.path.join(SCREAMING_SNAKE_CASE__ , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
snake_case: str = json.load(SCREAMING_SNAKE_CASE__ )
snake_case: int = [F"""<extra_id_{i}>""" for i in range(1_25 )]
snake_case: Optional[int] = added_tokens_extra_ids + [
'an_additional_special_token'
]
snake_case: str = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(SCREAMING_SNAKE_CASE__ , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
with open(os.path.join(SCREAMING_SNAKE_CASE__ , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
snake_case: Dict = tokenizer_class.from_pretrained(
SCREAMING_SNAKE_CASE__ , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
snake_case: Union[str, Any] = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=SCREAMING_SNAKE_CASE__ )]
snake_case: Union[str, Any] = tokenizer_class.from_pretrained(
SCREAMING_SNAKE_CASE__ , additional_special_tokens=SCREAMING_SNAKE_CASE__ , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[str] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(SCREAMING_SNAKE_CASE__ )
snake_case: Optional[Any] = tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertTrue(tokenizer.decode([2_55] ) == '' )
def _UpperCamelCase ( self ):
'''simple docstring'''
pass
def _UpperCamelCase ( self ):
'''simple docstring'''
pass
def _UpperCamelCase ( self ):
'''simple docstring'''
pass
def _UpperCamelCase ( self ):
'''simple docstring'''
pass
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Dict = self.get_tokenizers(fast=SCREAMING_SNAKE_CASE__ , do_lower_case=SCREAMING_SNAKE_CASE__ )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
snake_case: Union[str, Any] = ['t', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 'x', 't', '</s>']
snake_case: List[str] = tokenizer.convert_tokens_to_string(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: int = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
snake_case: Optional[Any] = [
'bos_token',
'eos_token',
'unk_token',
'sep_token',
'pad_token',
'cls_token',
'mask_token',
]
snake_case: Dict = 0
snake_case: List[Any] = tokenizer.convert_ids_to_tokens(
SCREAMING_SNAKE_CASE__ , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
for attr in attributes_list:
setattr(SCREAMING_SNAKE_CASE__ , attr + '_id' , SCREAMING_SNAKE_CASE__ )
self.assertEqual(getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
self.assertEqual(getattr(SCREAMING_SNAKE_CASE__ , attr + '_id' ) , SCREAMING_SNAKE_CASE__ )
setattr(SCREAMING_SNAKE_CASE__ , attr + '_id' , SCREAMING_SNAKE_CASE__ )
self.assertEqual(getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
self.assertEqual(getattr(SCREAMING_SNAKE_CASE__ , attr + '_id' ) , SCREAMING_SNAKE_CASE__ )
setattr(SCREAMING_SNAKE_CASE__ , 'additional_special_tokens_ids' , [] )
self.assertListEqual(getattr(SCREAMING_SNAKE_CASE__ , 'additional_special_tokens' ) , [] )
self.assertListEqual(getattr(SCREAMING_SNAKE_CASE__ , 'additional_special_tokens_ids' ) , [] )
setattr(SCREAMING_SNAKE_CASE__ , 'additional_special_tokens_ids' , [token_id_to_test_setters] )
self.assertListEqual(getattr(SCREAMING_SNAKE_CASE__ , 'additional_special_tokens' ) , [token_to_test_setters] )
self.assertListEqual(getattr(SCREAMING_SNAKE_CASE__ , 'additional_special_tokens_ids' ) , [token_id_to_test_setters] ) | 692 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
__UpperCAmelCase = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( snake_case ):
'''simple docstring'''
def __init__( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
warnings.warn(
'The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use DPTImageProcessor instead.' , SCREAMING_SNAKE_CASE__ , )
super().__init__(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) | 703 |
'''simple docstring'''
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = "geglu" , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = "layer_norm" , SCREAMING_SNAKE_CASE__ = False , ):
'''simple docstring'''
super().__init__()
snake_case: List[str] = only_cross_attention
snake_case: Optional[Any] = (num_embeds_ada_norm is not None) and norm_type == 'ada_norm_zero'
snake_case: Tuple = (num_embeds_ada_norm is not None) and norm_type == 'ada_norm'
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
F"""`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to"""
F""" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.""" )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
snake_case: List[str] = AdaLayerNorm(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif self.use_ada_layer_norm_zero:
snake_case: str = AdaLayerNormZero(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
snake_case: int = nn.LayerNorm(SCREAMING_SNAKE_CASE__ , elementwise_affine=SCREAMING_SNAKE_CASE__ )
snake_case: List[str] = Attention(
query_dim=SCREAMING_SNAKE_CASE__ , heads=SCREAMING_SNAKE_CASE__ , dim_head=SCREAMING_SNAKE_CASE__ , dropout=SCREAMING_SNAKE_CASE__ , bias=SCREAMING_SNAKE_CASE__ , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=SCREAMING_SNAKE_CASE__ , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
snake_case: Tuple = (
AdaLayerNorm(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if self.use_ada_layer_norm
else nn.LayerNorm(SCREAMING_SNAKE_CASE__ , elementwise_affine=SCREAMING_SNAKE_CASE__ )
)
snake_case: Any = Attention(
query_dim=SCREAMING_SNAKE_CASE__ , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=SCREAMING_SNAKE_CASE__ , dim_head=SCREAMING_SNAKE_CASE__ , dropout=SCREAMING_SNAKE_CASE__ , bias=SCREAMING_SNAKE_CASE__ , upcast_attention=SCREAMING_SNAKE_CASE__ , ) # is self-attn if encoder_hidden_states is none
else:
snake_case: int = None
snake_case: Tuple = None
# 3. Feed-forward
snake_case: Union[str, Any] = nn.LayerNorm(SCREAMING_SNAKE_CASE__ , elementwise_affine=SCREAMING_SNAKE_CASE__ )
snake_case: List[str] = FeedForward(SCREAMING_SNAKE_CASE__ , dropout=SCREAMING_SNAKE_CASE__ , activation_fn=SCREAMING_SNAKE_CASE__ , final_dropout=SCREAMING_SNAKE_CASE__ )
# let chunk size default to None
snake_case: Any = None
snake_case: Any = 0
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: Dict = chunk_size
snake_case: str = dim
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , ):
'''simple docstring'''
if self.use_ada_layer_norm:
snake_case: Optional[int] = self.norma(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif self.use_ada_layer_norm_zero:
snake_case , snake_case , snake_case , snake_case , snake_case: int = self.norma(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , hidden_dtype=hidden_states.dtype )
else:
snake_case: List[str] = self.norma(SCREAMING_SNAKE_CASE__ )
snake_case: Optional[Any] = cross_attention_kwargs if cross_attention_kwargs is not None else {}
snake_case: List[str] = self.attna(
SCREAMING_SNAKE_CASE__ , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
if self.use_ada_layer_norm_zero:
snake_case: Tuple = gate_msa.unsqueeze(1 ) * attn_output
snake_case: List[str] = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
snake_case: Dict = (
self.norma(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if self.use_ada_layer_norm else self.norma(SCREAMING_SNAKE_CASE__ )
)
snake_case: Any = self.attna(
SCREAMING_SNAKE_CASE__ , encoder_hidden_states=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
snake_case: List[str] = attn_output + hidden_states
# 3. Feed-forward
snake_case: str = self.norma(SCREAMING_SNAKE_CASE__ )
if self.use_ada_layer_norm_zero:
snake_case: str = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
F"""`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.""" )
snake_case: List[str] = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
snake_case: Optional[Any] = torch.cat(
[self.ff(SCREAMING_SNAKE_CASE__ ) for hid_slice in norm_hidden_states.chunk(SCREAMING_SNAKE_CASE__ , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
snake_case: int = self.ff(SCREAMING_SNAKE_CASE__ )
if self.use_ada_layer_norm_zero:
snake_case: Union[str, Any] = gate_mlp.unsqueeze(1 ) * ff_output
snake_case: Tuple = ff_output + hidden_states
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = 4 , SCREAMING_SNAKE_CASE__ = 0.0 , SCREAMING_SNAKE_CASE__ = "geglu" , SCREAMING_SNAKE_CASE__ = False , ):
'''simple docstring'''
super().__init__()
snake_case: int = int(dim * mult )
snake_case: Optional[Any] = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
snake_case: int = GELU(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if activation_fn == "gelu-approximate":
snake_case: Optional[Any] = GELU(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , approximate='tanh' )
elif activation_fn == "geglu":
snake_case: List[Any] = GEGLU(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif activation_fn == "geglu-approximate":
snake_case: Optional[int] = ApproximateGELU(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case: Any = nn.ModuleList([] )
# project in
self.net.append(SCREAMING_SNAKE_CASE__ )
# project dropout
self.net.append(nn.Dropout(SCREAMING_SNAKE_CASE__ ) )
# project out
self.net.append(nn.Linear(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(SCREAMING_SNAKE_CASE__ ) )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
for module in self.net:
snake_case: Optional[int] = module(SCREAMING_SNAKE_CASE__ )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = "none" ):
'''simple docstring'''
super().__init__()
snake_case: Optional[int] = nn.Linear(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = approximate
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if gate.device.type != "mps":
return F.gelu(SCREAMING_SNAKE_CASE__ , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: Dict = self.proj(SCREAMING_SNAKE_CASE__ )
snake_case: Dict = self.gelu(SCREAMING_SNAKE_CASE__ )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
super().__init__()
snake_case: Tuple = nn.Linear(SCREAMING_SNAKE_CASE__ , dim_out * 2 )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if gate.device.type != "mps":
return F.gelu(SCREAMING_SNAKE_CASE__ )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case , snake_case: int = self.proj(SCREAMING_SNAKE_CASE__ ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(SCREAMING_SNAKE_CASE__ )
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
super().__init__()
snake_case: str = nn.Linear(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: Optional[int] = self.proj(SCREAMING_SNAKE_CASE__ )
return x * torch.sigmoid(1.7_02 * x )
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
super().__init__()
snake_case: Optional[Any] = nn.Embedding(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case: Optional[Any] = nn.SiLU()
snake_case: Union[str, Any] = nn.Linear(SCREAMING_SNAKE_CASE__ , embedding_dim * 2 )
snake_case: int = nn.LayerNorm(SCREAMING_SNAKE_CASE__ , elementwise_affine=SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: Dict = self.linear(self.silu(self.emb(SCREAMING_SNAKE_CASE__ ) ) )
snake_case , snake_case: Dict = torch.chunk(SCREAMING_SNAKE_CASE__ , 2 )
snake_case: str = self.norm(SCREAMING_SNAKE_CASE__ ) * (1 + scale) + shift
return x
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
super().__init__()
snake_case: str = CombinedTimestepLabelEmbeddings(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case: int = nn.SiLU()
snake_case: Any = nn.Linear(SCREAMING_SNAKE_CASE__ , 6 * embedding_dim , bias=SCREAMING_SNAKE_CASE__ )
snake_case: List[Any] = nn.LayerNorm(SCREAMING_SNAKE_CASE__ , elementwise_affine=SCREAMING_SNAKE_CASE__ , eps=1E-6 )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
snake_case: int = self.linear(self.silu(self.emb(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , hidden_dtype=SCREAMING_SNAKE_CASE__ ) ) )
snake_case , snake_case , snake_case , snake_case , snake_case , snake_case: str = emb.chunk(6 , dim=1 )
snake_case: Dict = self.norm(SCREAMING_SNAKE_CASE__ ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = 1E-5 ):
'''simple docstring'''
super().__init__()
snake_case: str = num_groups
snake_case: str = eps
if act_fn is None:
snake_case: Dict = None
else:
snake_case: List[str] = get_activation(SCREAMING_SNAKE_CASE__ )
snake_case: Any = nn.Linear(SCREAMING_SNAKE_CASE__ , out_dim * 2 )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if self.act:
snake_case: Optional[Any] = self.act(SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = self.linear(SCREAMING_SNAKE_CASE__ )
snake_case: List[Any] = emb[:, :, None, None]
snake_case , snake_case: List[Any] = emb.chunk(2 , dim=1 )
snake_case: Any = F.group_norm(SCREAMING_SNAKE_CASE__ , self.num_groups , eps=self.eps )
snake_case: Optional[int] = x * (1 + scale) + shift
return x | 692 | 0 |
'''simple docstring'''
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class SCREAMING_SNAKE_CASE ( snake_case ):
'''simple docstring'''
__UpperCamelCase = ["image_processor"]
__UpperCamelCase = "SamImageProcessor"
def __init__( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
super().__init__(SCREAMING_SNAKE_CASE__ )
snake_case: int = self.image_processor
snake_case: List[Any] = -10
snake_case: List[Any] = self.image_processor.size['longest_edge']
def __call__( self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ):
'''simple docstring'''
snake_case: Union[str, Any] = self.image_processor(
SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
# pop arguments that are not used in the foward but used nevertheless
snake_case: Union[str, Any] = encoding_image_processor['original_sizes']
if hasattr(SCREAMING_SNAKE_CASE__ , 'numpy' ): # Checks if Torch or TF tensor
snake_case: Union[str, Any] = original_sizes.numpy()
snake_case: List[str] = self._check_and_preprocess_points(
input_points=SCREAMING_SNAKE_CASE__ , input_labels=SCREAMING_SNAKE_CASE__ , input_boxes=SCREAMING_SNAKE_CASE__ , )
snake_case: str = self._normalize_and_convert(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , input_points=SCREAMING_SNAKE_CASE__ , input_labels=SCREAMING_SNAKE_CASE__ , input_boxes=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , )
return encoding_image_processor
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__="pt" , ):
'''simple docstring'''
if input_points is not None:
if len(SCREAMING_SNAKE_CASE__ ) != len(SCREAMING_SNAKE_CASE__ ):
snake_case: Any = [
self._normalize_coordinates(self.target_size , SCREAMING_SNAKE_CASE__ , original_sizes[0] ) for point in input_points
]
else:
snake_case: str = [
self._normalize_coordinates(self.target_size , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for point, original_size in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
snake_case: str = self._pad_points_and_labels(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case: Optional[Any] = np.array(SCREAMING_SNAKE_CASE__ )
if input_labels is not None:
snake_case: Optional[int] = np.array(SCREAMING_SNAKE_CASE__ )
if input_boxes is not None:
if len(SCREAMING_SNAKE_CASE__ ) != len(SCREAMING_SNAKE_CASE__ ):
snake_case: Dict = [
self._normalize_coordinates(self.target_size , SCREAMING_SNAKE_CASE__ , original_sizes[0] , is_bounding_box=SCREAMING_SNAKE_CASE__ )
for box in input_boxes
]
else:
snake_case: List[Any] = [
self._normalize_coordinates(self.target_size , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , is_bounding_box=SCREAMING_SNAKE_CASE__ )
for box, original_size in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
]
snake_case: int = np.array(SCREAMING_SNAKE_CASE__ )
if input_boxes is not None:
if return_tensors == "pt":
snake_case: Optional[int] = torch.from_numpy(SCREAMING_SNAKE_CASE__ )
# boxes batch size of 1 by default
snake_case: Optional[Any] = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
snake_case: List[str] = tf.convert_to_tensor(SCREAMING_SNAKE_CASE__ )
# boxes batch size of 1 by default
snake_case: Tuple = tf.expand_dims(SCREAMING_SNAKE_CASE__ , 1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({'input_boxes': input_boxes} )
if input_points is not None:
if return_tensors == "pt":
snake_case: Any = torch.from_numpy(SCREAMING_SNAKE_CASE__ )
# point batch size of 1 by default
snake_case: List[str] = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
snake_case: List[Any] = tf.convert_to_tensor(SCREAMING_SNAKE_CASE__ )
# point batch size of 1 by default
snake_case: Optional[Any] = tf.expand_dims(SCREAMING_SNAKE_CASE__ , 1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({'input_points': input_points} )
if input_labels is not None:
if return_tensors == "pt":
snake_case: str = torch.from_numpy(SCREAMING_SNAKE_CASE__ )
# point batch size of 1 by default
snake_case: Tuple = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
snake_case: Union[str, Any] = tf.convert_to_tensor(SCREAMING_SNAKE_CASE__ )
# point batch size of 1 by default
snake_case: Optional[Any] = tf.expand_dims(SCREAMING_SNAKE_CASE__ , 1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({'input_labels': input_labels} )
return encoding_image_processor
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: int = max([point.shape[0] for point in input_points] )
snake_case: Union[str, Any] = []
for i, point in enumerate(SCREAMING_SNAKE_CASE__ ):
if point.shape[0] != expected_nb_points:
snake_case: Tuple = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] , axis=0 )
snake_case: Tuple = np.append(input_labels[i] , [self.point_pad_value] )
processed_input_points.append(SCREAMING_SNAKE_CASE__ )
snake_case: List[Any] = processed_input_points
return input_points, input_labels
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False ):
'''simple docstring'''
snake_case: Union[str, Any] = original_size
snake_case: str = self.image_processor._get_preprocess_shape(SCREAMING_SNAKE_CASE__ , longest_edge=SCREAMING_SNAKE_CASE__ )
snake_case: Union[str, Any] = deepcopy(SCREAMING_SNAKE_CASE__ ).astype(SCREAMING_SNAKE_CASE__ )
if is_bounding_box:
snake_case: Optional[Any] = coords.reshape(-1 , 2 , 2 )
snake_case: List[str] = coords[..., 0] * (new_w / old_w)
snake_case: List[Any] = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
snake_case: str = coords.reshape(-1 , 4 )
return coords
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , ):
'''simple docstring'''
if input_points is not None:
if hasattr(SCREAMING_SNAKE_CASE__ , 'numpy' ): # Checks for TF or Torch tensor
snake_case: Any = input_points.numpy().tolist()
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or not isinstance(input_points[0] , SCREAMING_SNAKE_CASE__ ):
raise ValueError('Input points must be a list of list of floating points.' )
snake_case: str = [np.array(SCREAMING_SNAKE_CASE__ ) for input_point in input_points]
else:
snake_case: str = None
if input_labels is not None:
if hasattr(SCREAMING_SNAKE_CASE__ , 'numpy' ):
snake_case: Tuple = input_labels.numpy().tolist()
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or not isinstance(input_labels[0] , SCREAMING_SNAKE_CASE__ ):
raise ValueError('Input labels must be a list of list integers.' )
snake_case: List[str] = [np.array(SCREAMING_SNAKE_CASE__ ) for label in input_labels]
else:
snake_case: Any = None
if input_boxes is not None:
if hasattr(SCREAMING_SNAKE_CASE__ , 'numpy' ):
snake_case: List[str] = input_boxes.numpy().tolist()
if (
not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
or not isinstance(input_boxes[0] , SCREAMING_SNAKE_CASE__ )
or not isinstance(input_boxes[0][0] , SCREAMING_SNAKE_CASE__ )
):
raise ValueError('Input boxes must be a list of list of list of floating points.' )
snake_case: Union[str, Any] = [np.array(SCREAMING_SNAKE_CASE__ ).astype(np.floataa ) for box in input_boxes]
else:
snake_case: Any = None
return input_points, input_labels, input_boxes
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Dict = self.image_processor.model_input_names
return list(dict.fromkeys(SCREAMING_SNAKE_CASE__ ) )
def _UpperCamelCase ( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return self.image_processor.post_process_masks(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
| 704 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class SCREAMING_SNAKE_CASE ( snake_case , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = RoCBertTokenizer
__UpperCamelCase = None
__UpperCamelCase = False
__UpperCamelCase = True
__UpperCamelCase = filter_non_english
def _UpperCamelCase ( self ):
'''simple docstring'''
super().setUp()
snake_case: Any = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', '你', '好', '是', '谁', 'a', 'b', 'c', 'd']
snake_case: List[Any] = {}
snake_case: List[str] = {}
for i, value in enumerate(SCREAMING_SNAKE_CASE__ ):
snake_case: List[Any] = i
snake_case: Union[str, Any] = i
snake_case: List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
snake_case: Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['word_shape_file'] )
snake_case: str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['word_pronunciation_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
with open(self.word_shape_file , 'w' , encoding='utf-8' ) as word_shape_writer:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ensure_ascii=SCREAMING_SNAKE_CASE__ )
with open(self.word_pronunciation_file , 'w' , encoding='utf-8' ) as word_pronunciation_writer:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ensure_ascii=SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Dict = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
snake_case: Dict = tokenizer.tokenize('你好[SEP]你是谁' )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , ['你', '好', '[SEP]', '你', '是', '谁'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(SCREAMING_SNAKE_CASE__ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(SCREAMING_SNAKE_CASE__ ) , [5, 6, 2, 5, 7, 8] )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Any = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[int] = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Union[str, Any] = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ , strip_accents=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: str = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ , strip_accents=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[int] = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: int = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Tuple = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ , strip_accents=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Any = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ , strip_accents=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: str = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Union[str, Any] = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
snake_case: Union[str, Any] = {}
for i, token in enumerate(SCREAMING_SNAKE_CASE__ ):
snake_case: str = i
snake_case: Optional[int] = RoCBertWordpieceTokenizer(vocab=SCREAMING_SNAKE_CASE__ , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
def _UpperCamelCase ( self ):
'''simple docstring'''
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def _UpperCamelCase ( self ):
'''simple docstring'''
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def _UpperCamelCase ( self ):
'''simple docstring'''
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[int] = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(SCREAMING_SNAKE_CASE__ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
if self.test_rust_tokenizer:
snake_case: int = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE__ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
def _UpperCamelCase ( self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
snake_case: Tuple = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
snake_case: Optional[Any] = F"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
snake_case: List[str] = tokenizer_r.encode_plus(
SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , return_token_type_ids=SCREAMING_SNAKE_CASE__ , return_offsets_mapping=SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , )
snake_case: Optional[int] = tokenizer_r.do_lower_case if hasattr(SCREAMING_SNAKE_CASE__ , 'do_lower_case' ) else False
snake_case: int = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'Allen'),
((21, 23), '##NL'),
((23, 24), '##P'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'allen'),
((21, 23), '##nl'),
((23, 24), '##p'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Dict = ['的', '人', '有']
snake_case: Any = ''.join(SCREAMING_SNAKE_CASE__ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
snake_case: Tuple = True
snake_case: List[Any] = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
snake_case: List[Any] = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = tokenizer_p.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
snake_case: str = tokenizer_r.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = tokenizer_r.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ )
snake_case: str = tokenizer_p.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case: Tuple = False
snake_case: Union[str, Any] = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
snake_case: Union[str, Any] = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
snake_case: Union[str, Any] = tokenizer_r.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
snake_case: int = tokenizer_p.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = tokenizer_r.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ )
snake_case: Union[str, Any] = tokenizer_p.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ )
# it is expected that only the first Chinese character is not preceded by "##".
snake_case: Union[str, Any] = [
F"""##{token}""" if idx != 0 else token for idx, token in enumerate(SCREAMING_SNAKE_CASE__ )
]
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@slow
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: int = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
snake_case: int = tokenizer.encode('你好' , add_special_tokens=SCREAMING_SNAKE_CASE__ )
snake_case: Any = tokenizer.encode('你是谁' , add_special_tokens=SCREAMING_SNAKE_CASE__ )
snake_case: Tuple = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE__ )
snake_case: str = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[str] = self.get_tokenizers(do_lower_case=SCREAMING_SNAKE_CASE__ )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
snake_case: Dict = '你好,你是谁'
snake_case: int = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
snake_case: Union[str, Any] = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
snake_case: List[str] = tokenizer.convert_tokens_to_shape_ids(SCREAMING_SNAKE_CASE__ )
snake_case: Union[str, Any] = tokenizer.convert_tokens_to_pronunciation_ids(SCREAMING_SNAKE_CASE__ )
snake_case: Optional[Any] = tokenizer.prepare_for_model(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = tokenizer.encode_plus(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) | 692 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( snake_case , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = ConsistencyModelPipeline
__UpperCamelCase = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
__UpperCamelCase = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
__UpperCamelCase = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"output_type",
"return_dict",
"callback",
"callback_steps",
] )
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: str = UNetaDModel.from_pretrained(
'diffusers/consistency-models-test' , subfolder='test_unet' , )
return unet
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[Any] = UNetaDModel.from_pretrained(
'diffusers/consistency-models-test' , subfolder='test_unet_class_cond' , )
return unet
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__=False ):
'''simple docstring'''
if class_cond:
snake_case: Dict = self.dummy_cond_unet
else:
snake_case: List[Any] = self.dummy_uncond_unet
# Default to CM multistep sampler
snake_case: Optional[Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
snake_case: Union[str, Any] = {
'unet': unet,
'scheduler': scheduler,
}
return components
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=0 ):
'''simple docstring'''
if str(SCREAMING_SNAKE_CASE__ ).startswith('mps' ):
snake_case: Any = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
snake_case: Union[str, Any] = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
snake_case: int = {
'batch_size': 1,
'num_inference_steps': None,
'timesteps': [22, 0],
'generator': generator,
'output_type': 'np',
}
return inputs
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
snake_case: Union[str, Any] = self.get_dummy_components()
snake_case: Tuple = ConsistencyModelPipeline(**SCREAMING_SNAKE_CASE__ )
snake_case: Any = pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
snake_case: int = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
snake_case: Dict = pipe(**SCREAMING_SNAKE_CASE__ ).images
assert image.shape == (1, 32, 32, 3)
snake_case: str = image[0, -3:, -3:, -1]
snake_case: List[str] = np.array([0.35_72, 0.62_73, 0.40_31, 0.39_61, 0.43_21, 0.57_30, 0.52_66, 0.47_80, 0.50_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
snake_case: Any = self.get_dummy_components(class_cond=SCREAMING_SNAKE_CASE__ )
snake_case: Any = ConsistencyModelPipeline(**SCREAMING_SNAKE_CASE__ )
snake_case: List[str] = pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
snake_case: Any = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
snake_case: List[str] = 0
snake_case: int = pipe(**SCREAMING_SNAKE_CASE__ ).images
assert image.shape == (1, 32, 32, 3)
snake_case: Optional[Any] = image[0, -3:, -3:, -1]
snake_case: Optional[int] = np.array([0.35_72, 0.62_73, 0.40_31, 0.39_61, 0.43_21, 0.57_30, 0.52_66, 0.47_80, 0.50_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[str] = 'cpu' # ensure determinism for the device-dependent torch.Generator
snake_case: List[Any] = self.get_dummy_components()
snake_case: Dict = ConsistencyModelPipeline(**SCREAMING_SNAKE_CASE__ )
snake_case: List[Any] = pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
snake_case: str = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
snake_case: List[Any] = 1
snake_case: Dict = None
snake_case: Union[str, Any] = pipe(**SCREAMING_SNAKE_CASE__ ).images
assert image.shape == (1, 32, 32, 3)
snake_case: str = image[0, -3:, -3:, -1]
snake_case: List[Any] = np.array([0.50_04, 0.50_04, 0.49_94, 0.50_08, 0.49_76, 0.50_18, 0.49_90, 0.49_82, 0.49_87] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
snake_case: Optional[int] = self.get_dummy_components(class_cond=SCREAMING_SNAKE_CASE__ )
snake_case: List[Any] = ConsistencyModelPipeline(**SCREAMING_SNAKE_CASE__ )
snake_case: int = pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
snake_case: List[Any] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
snake_case: Dict = 1
snake_case: int = None
snake_case: Optional[int] = 0
snake_case: Any = pipe(**SCREAMING_SNAKE_CASE__ ).images
assert image.shape == (1, 32, 32, 3)
snake_case: List[str] = image[0, -3:, -3:, -1]
snake_case: Optional[int] = np.array([0.50_04, 0.50_04, 0.49_94, 0.50_08, 0.49_76, 0.50_18, 0.49_90, 0.49_82, 0.49_87] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__="cpu" , SCREAMING_SNAKE_CASE__=torch.floataa , SCREAMING_SNAKE_CASE__=(1, 3, 64, 64) ):
'''simple docstring'''
snake_case: str = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
snake_case: List[str] = {
'num_inference_steps': None,
'timesteps': [22, 0],
'class_labels': 0,
'generator': generator,
'output_type': 'np',
}
if get_fixed_latents:
snake_case: Any = self.get_fixed_latents(seed=SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ , shape=SCREAMING_SNAKE_CASE__ )
snake_case: List[Any] = latents
return inputs
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__="cpu" , SCREAMING_SNAKE_CASE__=torch.floataa , SCREAMING_SNAKE_CASE__=(1, 3, 64, 64) ):
'''simple docstring'''
if type(SCREAMING_SNAKE_CASE__ ) == str:
snake_case: Optional[int] = torch.device(SCREAMING_SNAKE_CASE__ )
snake_case: Optional[Any] = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = randn_tensor(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ )
return latents
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: int = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
snake_case: int = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
snake_case: Optional[int] = ConsistencyModelPipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
pipe.to(torch_device=SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
snake_case: str = self.get_inputs()
snake_case: Optional[int] = pipe(**SCREAMING_SNAKE_CASE__ ).images
assert image.shape == (1, 64, 64, 3)
snake_case: Tuple = image[0, -3:, -3:, -1]
snake_case: Optional[Any] = np.array([0.08_88, 0.08_81, 0.06_66, 0.04_79, 0.02_92, 0.01_95, 0.02_01, 0.01_63, 0.02_54] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[int] = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
snake_case: int = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
snake_case: Optional[Any] = ConsistencyModelPipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
pipe.to(torch_device=SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
snake_case: str = self.get_inputs()
snake_case: Union[str, Any] = 1
snake_case: List[str] = None
snake_case: int = pipe(**SCREAMING_SNAKE_CASE__ ).images
assert image.shape == (1, 64, 64, 3)
snake_case: Optional[int] = image[0, -3:, -3:, -1]
snake_case: str = np.array([0.03_40, 0.01_52, 0.00_63, 0.02_67, 0.02_21, 0.01_07, 0.04_16, 0.01_86, 0.02_17] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
@require_torch_a
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Tuple = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
snake_case: Dict = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
snake_case: str = ConsistencyModelPipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
pipe.to(torch_device=SCREAMING_SNAKE_CASE__ , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = self.get_inputs(get_fixed_latents=SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=SCREAMING_SNAKE_CASE__ , enable_math=SCREAMING_SNAKE_CASE__ , enable_mem_efficient=SCREAMING_SNAKE_CASE__ ):
snake_case: List[str] = pipe(**SCREAMING_SNAKE_CASE__ ).images
assert image.shape == (1, 64, 64, 3)
snake_case: Union[str, Any] = image[0, -3:, -3:, -1]
snake_case: Dict = np.array([0.18_75, 0.14_28, 0.12_89, 0.21_51, 0.20_92, 0.14_77, 0.18_77, 0.16_41, 0.13_53] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@require_torch_a
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Union[str, Any] = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
snake_case: List[Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
snake_case: Tuple = ConsistencyModelPipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
pipe.to(torch_device=SCREAMING_SNAKE_CASE__ , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
snake_case: str = self.get_inputs(get_fixed_latents=SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ )
snake_case: List[str] = 1
snake_case: Any = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=SCREAMING_SNAKE_CASE__ , enable_math=SCREAMING_SNAKE_CASE__ , enable_mem_efficient=SCREAMING_SNAKE_CASE__ ):
snake_case: List[str] = pipe(**SCREAMING_SNAKE_CASE__ ).images
assert image.shape == (1, 64, 64, 3)
snake_case: List[str] = image[0, -3:, -3:, -1]
snake_case: Union[str, Any] = np.array([0.16_63, 0.19_48, 0.22_75, 0.16_80, 0.12_04, 0.12_45, 0.18_58, 0.13_38, 0.20_95] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 | 705 |
'''simple docstring'''
from math import asin, atan, cos, radians, sin, sqrt, tan
__UpperCAmelCase = 6378137.0
__UpperCAmelCase = 6356752.314245
__UpperCAmelCase = 6_378_137
def lowerCAmelCase_ ( __A : float , __A : float , __A : float , __A : float ):
'''simple docstring'''
snake_case: Optional[Any] = (AXIS_A - AXIS_B) / AXIS_A
snake_case: List[Any] = atan((1 - flattening) * tan(radians(__A ) ) )
snake_case: List[Any] = atan((1 - flattening) * tan(radians(__A ) ) )
snake_case: Tuple = radians(__A )
snake_case: Tuple = radians(__A )
# Equation
snake_case: List[Any] = sin((phi_a - phi_a) / 2 )
snake_case: Dict = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
snake_case: Union[str, Any] = sqrt(sin_sq_phi + (cos(__A ) * cos(__A ) * sin_sq_lambda) )
return 2 * RADIUS * asin(__A )
if __name__ == "__main__":
import doctest
doctest.testmod() | 692 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"facebook/timesformer": "https://huggingface.co/facebook/timesformer/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE ( snake_case ):
'''simple docstring'''
__UpperCamelCase = "timesformer"
def __init__( self , SCREAMING_SNAKE_CASE__=2_24 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=7_68 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=30_72 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-6 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__="divided_space_time" , SCREAMING_SNAKE_CASE__=0 , **SCREAMING_SNAKE_CASE__ , ):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE__ )
snake_case: Optional[Any] = image_size
snake_case: Tuple = patch_size
snake_case: int = num_channels
snake_case: int = num_frames
snake_case: Tuple = hidden_size
snake_case: Optional[int] = num_hidden_layers
snake_case: Any = num_attention_heads
snake_case: Tuple = intermediate_size
snake_case: List[str] = hidden_act
snake_case: int = hidden_dropout_prob
snake_case: List[str] = attention_probs_dropout_prob
snake_case: List[str] = initializer_range
snake_case: int = layer_norm_eps
snake_case: str = qkv_bias
snake_case: Optional[int] = attention_type
snake_case: List[str] = drop_path_rate | 706 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase = {
"configuration_roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerOnnxConfig"],
"tokenization_roformer": ["RoFormerTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ["RoFormerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoFormerForCausalLM",
"RoFormerForMaskedLM",
"RoFormerForMultipleChoice",
"RoFormerForQuestionAnswering",
"RoFormerForSequenceClassification",
"RoFormerForTokenClassification",
"RoFormerLayer",
"RoFormerModel",
"RoFormerPreTrainedModel",
"load_tf_weights_in_roformer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRoFormerForCausalLM",
"TFRoFormerForMaskedLM",
"TFRoFormerForMultipleChoice",
"TFRoFormerForQuestionAnswering",
"TFRoFormerForSequenceClassification",
"TFRoFormerForTokenClassification",
"TFRoFormerLayer",
"TFRoFormerModel",
"TFRoFormerPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxRoFormerForMaskedLM",
"FlaxRoFormerForMultipleChoice",
"FlaxRoFormerForQuestionAnswering",
"FlaxRoFormerForSequenceClassification",
"FlaxRoFormerForTokenClassification",
"FlaxRoFormerModel",
"FlaxRoFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 692 | 0 |
'''simple docstring'''
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
__UpperCAmelCase = [
"kernels/rwkv/wkv_cuda.cu",
"kernels/rwkv/wkv_op.cpp",
"kernels/deformable_detr/ms_deform_attn.h",
"kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh",
"models/graphormer/algos_graphormer.pyx",
]
def lowerCAmelCase_ ( __A : Dict ):
'''simple docstring'''
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("--check_lib", action="store_true", help="Whether to check the build or the actual package.")
__UpperCAmelCase = parser.parse_args()
if args.check_lib:
__UpperCAmelCase = importlib.import_module("transformers")
__UpperCAmelCase = Path(transformers_module.__file__).parent
else:
__UpperCAmelCase = Path.cwd() / "build/lib/transformers"
if not test_custom_files_are_present(transformers_path):
raise ValueError("The built release does not contain the custom files. Fix this before going further!") | 707 |
'''simple docstring'''
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def lowerCAmelCase_ ( __A : Tuple ):
'''simple docstring'''
snake_case: Tuple = model.config
snake_case: str = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=1_28 , )
snake_case: Optional[Any] = MBartConfig(
is_decoder=__A , is_encoder_decoder=__A , add_cross_attention=__A , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=__A , add_final_layer_norm=__A , )
return encoder_config, decoder_config
def lowerCAmelCase_ ( __A : int ):
'''simple docstring'''
if "encoder.model" in name:
snake_case: Optional[Any] = name.replace('encoder.model' , 'encoder' )
if "decoder.model" in name:
snake_case: str = name.replace('decoder.model' , 'decoder' )
if "patch_embed.proj" in name:
snake_case: Any = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
snake_case: Optional[int] = name.replace('patch_embed.norm' , 'embeddings.norm' )
if name.startswith('encoder' ):
if "layers" in name:
snake_case: Tuple = 'encoder.' + name
if "attn.proj" in name:
snake_case: Optional[int] = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name and "mask" not in name:
snake_case: Dict = name.replace('attn' , 'attention.self' )
if "norm1" in name:
snake_case: Union[str, Any] = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
snake_case: Dict = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
snake_case: List[str] = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
snake_case: Dict = name.replace('mlp.fc2' , 'output.dense' )
if name == "encoder.norm.weight":
snake_case: Dict = 'encoder.layernorm.weight'
if name == "encoder.norm.bias":
snake_case: int = 'encoder.layernorm.bias'
return name
def lowerCAmelCase_ ( __A : List[Any] , __A : Optional[Any] ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
snake_case: List[Any] = orig_state_dict.pop(__A )
if "qkv" in key:
snake_case: Union[str, Any] = key.split('.' )
snake_case: Optional[Any] = int(key_split[3] )
snake_case: Any = int(key_split[5] )
snake_case: Union[str, Any] = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
snake_case: Union[str, Any] = val[:dim, :]
snake_case: Any = val[dim : dim * 2, :]
snake_case: List[str] = val[-dim:, :]
else:
snake_case: str = val[:dim]
snake_case: Union[str, Any] = val[dim : dim * 2]
snake_case: List[Any] = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
snake_case: Optional[int] = val
return orig_state_dict
def lowerCAmelCase_ ( __A : List[Any] , __A : Any=None , __A : List[str]=False ):
'''simple docstring'''
snake_case: str = DonutModel.from_pretrained(__A ).eval()
# load HuggingFace model
snake_case , snake_case: Optional[Any] = get_configs(__A )
snake_case: Optional[int] = DonutSwinModel(__A )
snake_case: Tuple = MBartForCausalLM(__A )
snake_case: Optional[Any] = VisionEncoderDecoderModel(encoder=__A , decoder=__A )
model.eval()
snake_case: Optional[int] = original_model.state_dict()
snake_case: Optional[int] = convert_state_dict(__A , __A )
model.load_state_dict(__A )
# verify results on scanned document
snake_case: Union[str, Any] = load_dataset('hf-internal-testing/example-documents' )
snake_case: str = dataset['test'][0]['image'].convert('RGB' )
snake_case: Optional[int] = XLMRobertaTokenizerFast.from_pretrained(__A , from_slow=__A )
snake_case: Any = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
snake_case: Dict = DonutProcessor(__A , __A )
snake_case: Optional[Any] = processor(__A , return_tensors='pt' ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
snake_case: int = '<s_docvqa><s_question>{user_input}</s_question><s_answer>'
snake_case: Optional[Any] = 'When is the coffee break?'
snake_case: Optional[int] = task_prompt.replace('{user_input}' , __A )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
snake_case: Dict = '<s_rvlcdip>'
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
snake_case: str = '<s_cord>'
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
snake_case: str = 's_cord-v2>'
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
snake_case: int = '<s_zhtrainticket>'
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
snake_case: Optional[Any] = 'hello world'
else:
raise ValueError('Model name not supported' )
snake_case: Optional[int] = original_model.decoder.tokenizer(__A , add_special_tokens=__A , return_tensors='pt' )[
'input_ids'
]
snake_case: Any = original_model.encoder.model.patch_embed(__A )
snake_case , snake_case: Dict = model.encoder.embeddings(__A )
assert torch.allclose(__A , __A , atol=1E-3 )
# verify encoder hidden states
snake_case: Tuple = original_model.encoder(__A )
snake_case: List[str] = model.encoder(__A ).last_hidden_state
assert torch.allclose(__A , __A , atol=1E-2 )
# verify decoder hidden states
snake_case: List[Any] = original_model(__A , __A , __A ).logits
snake_case: List[Any] = model(__A , decoder_input_ids=__A ).logits
assert torch.allclose(__A , __A , atol=1E-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(f"""Saving model and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(__A )
processor.save_pretrained(__A )
if push_to_hub:
model.push_to_hub('nielsr/' + model_name.split('/' )[-1] , commit_message='Update model' )
processor.push_to_hub('nielsr/' + model_name.split('/' )[-1] , commit_message='Update model' )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="naver-clova-ix/donut-base-finetuned-docvqa",
required=False,
type=str,
help="Name of the original model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
required=False,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether or not to push the converted model and processor to the 🤗 hub.",
)
__UpperCAmelCase = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 692 | 0 |
__UpperCAmelCase = range(2, 20 + 1)
__UpperCAmelCase = [10**k for k in range(ks[-1] + 1)]
__UpperCAmelCase = {}
def lowerCAmelCase_ ( __A : Tuple , __A : Tuple , __A : Optional[Any] , __A : str ):
'''simple docstring'''
snake_case: List[str] = sum(a_i[j] for j in range(__A , len(__A ) ) )
snake_case: int = sum(a_i[j] * base[j] for j in range(min(len(__A ) , __A ) ) )
snake_case: Any = 0, 0
snake_case: Any = n - i
snake_case: Dict = memo.get(__A )
if sub_memo is not None:
snake_case: str = sub_memo.get(__A )
if jumps is not None and len(__A ) > 0:
# find and make the largest jump without going over
snake_case: Dict = -1
for _k in range(len(__A ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
snake_case: Optional[int] = _k
break
if max_jump >= 0:
snake_case: List[str] = jumps[max_jump]
# since the difference between jumps is cached, add c
snake_case: Optional[int] = diff + c
for j in range(min(__A , len(__A ) ) ):
snake_case: List[str] = divmod(__A , 10 )
if new_c > 0:
add(__A , __A , __A )
else:
snake_case: Optional[int] = []
else:
snake_case: Dict = {c: []}
snake_case: str = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
snake_case: Any = next_term(__A , k - 1 , i + dn , __A )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
snake_case: List[Any] = compute(__A , __A , i + dn , __A )
diff += _diff
dn += terms_jumped
snake_case: Optional[int] = sub_memo[c]
# keep jumps sorted by # of terms skipped
snake_case: str = 0
while j < len(__A ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(__A , (diff, dn, k) )
return (diff, dn)
def lowerCAmelCase_ ( __A : Optional[int] , __A : Optional[int] , __A : Optional[int] , __A : Dict ):
'''simple docstring'''
if i >= n:
return 0, i
if k > len(__A ):
a_i.extend([0 for _ in range(k - len(__A ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
snake_case: str = i
snake_case: Union[str, Any] = 0, 0, 0
for j in range(len(__A ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
snake_case: Optional[int] = ds_c + ds_b
diff += addend
snake_case: Tuple = 0
for j in range(__A ):
snake_case: Optional[Any] = a_i[j] + addend
snake_case: str = divmod(__A , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(__A , __A , __A )
return diff, i - start_i
def lowerCAmelCase_ ( __A : List[Any] , __A : Union[str, Any] , __A : Any ):
'''simple docstring'''
for j in range(__A , len(__A ) ):
snake_case: int = digits[j] + addend
if s >= 10:
snake_case: Optional[int] = divmod(__A , 10 )
snake_case: Optional[Any] = addend // 10 + quotient
else:
snake_case: Dict = s
snake_case: str = addend // 10
if addend == 0:
break
while addend > 0:
snake_case: Dict = divmod(__A , 10 )
digits.append(__A )
def lowerCAmelCase_ ( __A : int = 10**15 ):
'''simple docstring'''
snake_case: List[str] = [1]
snake_case: List[str] = 1
snake_case: int = 0
while True:
snake_case: Tuple = next_term(__A , 20 , i + dn , __A )
dn += terms_jumped
if dn == n - i:
break
snake_case: Tuple = 0
for j in range(len(__A ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(F'{solution() = }') | 708 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[Any] = {
'task_specific_params': {
'summarization': {'length_penalty': 1.0, 'max_length': 1_28, 'min_length': 12, 'num_beams': 4},
'summarization_cnn': {'length_penalty': 2.0, 'max_length': 1_42, 'min_length': 56, 'num_beams': 4},
'summarization_xsum': {'length_penalty': 1.0, 'max_length': 62, 'min_length': 11, 'num_beams': 6},
}
}
snake_case: Union[str, Any] = {
'task_specific_params.summarization.length_penalty': 1.0,
'task_specific_params.summarization.max_length': 1_28,
'task_specific_params.summarization.min_length': 12,
'task_specific_params.summarization.num_beams': 4,
'task_specific_params.summarization_cnn.length_penalty': 2.0,
'task_specific_params.summarization_cnn.max_length': 1_42,
'task_specific_params.summarization_cnn.min_length': 56,
'task_specific_params.summarization_cnn.num_beams': 4,
'task_specific_params.summarization_xsum.length_penalty': 1.0,
'task_specific_params.summarization_xsum.max_length': 62,
'task_specific_params.summarization_xsum.min_length': 11,
'task_specific_params.summarization_xsum.num_beams': 6,
}
self.assertEqual(flatten_dict(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Dict = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(SCREAMING_SNAKE_CASE__ ) , x.transpose() ) )
snake_case: List[str] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(SCREAMING_SNAKE_CASE__ , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Tuple = np.random.randn(3 , 4 )
snake_case: Optional[Any] = torch.tensor(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(transpose(SCREAMING_SNAKE_CASE__ ) , transpose(SCREAMING_SNAKE_CASE__ ).numpy() ) )
snake_case: Optional[Any] = np.random.randn(3 , 4 , 5 )
snake_case: Optional[int] = torch.tensor(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(transpose(SCREAMING_SNAKE_CASE__ , axes=(1, 2, 0) ) , transpose(SCREAMING_SNAKE_CASE__ , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: str = np.random.randn(3 , 4 )
snake_case: Optional[Any] = tf.constant(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(transpose(SCREAMING_SNAKE_CASE__ ) , transpose(SCREAMING_SNAKE_CASE__ ).numpy() ) )
snake_case: Dict = np.random.randn(3 , 4 , 5 )
snake_case: str = tf.constant(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(transpose(SCREAMING_SNAKE_CASE__ , axes=(1, 2, 0) ) , transpose(SCREAMING_SNAKE_CASE__ , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: int = np.random.randn(3 , 4 )
snake_case: Optional[int] = jnp.array(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(transpose(SCREAMING_SNAKE_CASE__ ) , np.asarray(transpose(SCREAMING_SNAKE_CASE__ ) ) ) )
snake_case: Any = np.random.randn(3 , 4 , 5 )
snake_case: Optional[Any] = jnp.array(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(transpose(SCREAMING_SNAKE_CASE__ , axes=(1, 2, 0) ) , np.asarray(transpose(SCREAMING_SNAKE_CASE__ , axes=(1, 2, 0) ) ) ) )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[Any] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(SCREAMING_SNAKE_CASE__ , (4, 3) ) , np.reshape(SCREAMING_SNAKE_CASE__ , (4, 3) ) ) )
snake_case: Optional[int] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(SCREAMING_SNAKE_CASE__ , (12, 5) ) , np.reshape(SCREAMING_SNAKE_CASE__ , (12, 5) ) ) )
@require_torch
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[Any] = np.random.randn(3 , 4 )
snake_case: Tuple = torch.tensor(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(reshape(SCREAMING_SNAKE_CASE__ , (4, 3) ) , reshape(SCREAMING_SNAKE_CASE__ , (4, 3) ).numpy() ) )
snake_case: List[str] = np.random.randn(3 , 4 , 5 )
snake_case: Tuple = torch.tensor(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(reshape(SCREAMING_SNAKE_CASE__ , (12, 5) ) , reshape(SCREAMING_SNAKE_CASE__ , (12, 5) ).numpy() ) )
@require_tf
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[Any] = np.random.randn(3 , 4 )
snake_case: Tuple = tf.constant(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(reshape(SCREAMING_SNAKE_CASE__ , (4, 3) ) , reshape(SCREAMING_SNAKE_CASE__ , (4, 3) ).numpy() ) )
snake_case: Any = np.random.randn(3 , 4 , 5 )
snake_case: Union[str, Any] = tf.constant(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(reshape(SCREAMING_SNAKE_CASE__ , (12, 5) ) , reshape(SCREAMING_SNAKE_CASE__ , (12, 5) ).numpy() ) )
@require_flax
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: int = np.random.randn(3 , 4 )
snake_case: str = jnp.array(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(reshape(SCREAMING_SNAKE_CASE__ , (4, 3) ) , np.asarray(reshape(SCREAMING_SNAKE_CASE__ , (4, 3) ) ) ) )
snake_case: Any = np.random.randn(3 , 4 , 5 )
snake_case: List[str] = jnp.array(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(reshape(SCREAMING_SNAKE_CASE__ , (12, 5) ) , np.asarray(reshape(SCREAMING_SNAKE_CASE__ , (12, 5) ) ) ) )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: int = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(SCREAMING_SNAKE_CASE__ ) , np.squeeze(SCREAMING_SNAKE_CASE__ ) ) )
snake_case: List[str] = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(SCREAMING_SNAKE_CASE__ , axis=2 ) , np.squeeze(SCREAMING_SNAKE_CASE__ , axis=2 ) ) )
@require_torch
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: int = np.random.randn(1 , 3 , 4 )
snake_case: List[str] = torch.tensor(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(squeeze(SCREAMING_SNAKE_CASE__ ) , squeeze(SCREAMING_SNAKE_CASE__ ).numpy() ) )
snake_case: Optional[Any] = np.random.randn(1 , 4 , 1 , 5 )
snake_case: int = torch.tensor(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(squeeze(SCREAMING_SNAKE_CASE__ , axis=2 ) , squeeze(SCREAMING_SNAKE_CASE__ , axis=2 ).numpy() ) )
@require_tf
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Any = np.random.randn(1 , 3 , 4 )
snake_case: Optional[Any] = tf.constant(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(squeeze(SCREAMING_SNAKE_CASE__ ) , squeeze(SCREAMING_SNAKE_CASE__ ).numpy() ) )
snake_case: Union[str, Any] = np.random.randn(1 , 4 , 1 , 5 )
snake_case: Union[str, Any] = tf.constant(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(squeeze(SCREAMING_SNAKE_CASE__ , axis=2 ) , squeeze(SCREAMING_SNAKE_CASE__ , axis=2 ).numpy() ) )
@require_flax
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[int] = np.random.randn(1 , 3 , 4 )
snake_case: List[Any] = jnp.array(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(squeeze(SCREAMING_SNAKE_CASE__ ) , np.asarray(squeeze(SCREAMING_SNAKE_CASE__ ) ) ) )
snake_case: Tuple = np.random.randn(1 , 4 , 1 , 5 )
snake_case: Tuple = jnp.array(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(squeeze(SCREAMING_SNAKE_CASE__ , axis=2 ) , np.asarray(squeeze(SCREAMING_SNAKE_CASE__ , axis=2 ) ) ) )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[int] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(SCREAMING_SNAKE_CASE__ , axis=1 ) , np.expand_dims(SCREAMING_SNAKE_CASE__ , axis=1 ) ) )
@require_torch
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: int = np.random.randn(3 , 4 )
snake_case: Any = torch.tensor(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(expand_dims(SCREAMING_SNAKE_CASE__ , axis=1 ) , expand_dims(SCREAMING_SNAKE_CASE__ , axis=1 ).numpy() ) )
@require_tf
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: str = np.random.randn(3 , 4 )
snake_case: Any = tf.constant(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(expand_dims(SCREAMING_SNAKE_CASE__ , axis=1 ) , expand_dims(SCREAMING_SNAKE_CASE__ , axis=1 ).numpy() ) )
@require_flax
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[Any] = np.random.randn(3 , 4 )
snake_case: int = jnp.array(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(expand_dims(SCREAMING_SNAKE_CASE__ , axis=1 ) , np.asarray(expand_dims(SCREAMING_SNAKE_CASE__ , axis=1 ) ) ) ) | 692 | 0 |
'''simple docstring'''
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , ):
'''simple docstring'''
snake_case: str = parent
snake_case: Tuple = 13
snake_case: List[Any] = 7
snake_case: Tuple = 30
snake_case: Dict = self.seq_length + self.mem_len
snake_case: Any = 15
snake_case: Optional[int] = True
snake_case: Any = True
snake_case: List[Any] = 99
snake_case: Union[str, Any] = [10, 50, 80]
snake_case: Any = 32
snake_case: int = 32
snake_case: str = 4
snake_case: List[Any] = 8
snake_case: Any = 1_28
snake_case: Optional[Any] = 2
snake_case: Union[str, Any] = 2
snake_case: Dict = None
snake_case: List[str] = 1
snake_case: List[Any] = 0
snake_case: Dict = 3
snake_case: Optional[Any] = self.vocab_size - 1
snake_case: Optional[int] = 0.01
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case: Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case: str = None
if self.use_labels:
snake_case: int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case: int = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def _UpperCamelCase ( self ):
'''simple docstring'''
random.seed(self.seed )
tf.random.set_seed(self.seed )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: List[Any] = TFTransfoXLModel(SCREAMING_SNAKE_CASE__ )
snake_case: Tuple = model(SCREAMING_SNAKE_CASE__ ).to_tuple()
snake_case: Dict = {'input_ids': input_ids_a, 'mems': mems_a}
snake_case: Tuple = model(SCREAMING_SNAKE_CASE__ ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: Optional[int] = TFTransfoXLLMHeadModel(SCREAMING_SNAKE_CASE__ )
snake_case: Union[str, Any] = model(SCREAMING_SNAKE_CASE__ ).to_tuple()
snake_case: Dict = {'input_ids': input_ids_a, 'labels': lm_labels}
snake_case: int = model(SCREAMING_SNAKE_CASE__ ).to_tuple()
snake_case: Optional[Any] = model([input_ids_a, mems_a] ).to_tuple()
snake_case: Optional[Any] = {'input_ids': input_ids_a, 'mems': mems_a, 'labels': lm_labels}
snake_case: str = model(SCREAMING_SNAKE_CASE__ ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: Dict = TFTransfoXLForSequenceClassification(SCREAMING_SNAKE_CASE__ )
snake_case: Dict = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Tuple = self.prepare_config_and_inputs()
(snake_case): List[str] = config_and_inputs
snake_case: List[str] = {'input_ids': input_ids_a}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE ( snake_case , snake_case , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
__UpperCamelCase = () if is_tf_available() else ()
__UpperCamelCase = (
{
"feature-extraction": TFTransfoXLModel,
"text-classification": TFTransfoXLForSequenceClassification,
"text-generation": TFTransfoXLLMHeadModel,
"zero-shot": TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Any = TFTransfoXLModelTester(self )
snake_case: Dict = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , d_embed=37 )
def _UpperCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _UpperCamelCase ( self ):
'''simple docstring'''
self.model_tester.set_seed()
snake_case: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
self.model_tester.set_seed()
snake_case: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[str] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case: List[Any] = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
snake_case: Optional[int] = model_class(SCREAMING_SNAKE_CASE__ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
snake_case: List[Any] = model.get_output_embeddings()
assert isinstance(SCREAMING_SNAKE_CASE__ , tf.keras.layers.Layer )
snake_case: Any = model.get_bias()
assert name is None
else:
snake_case: List[str] = model.get_output_embeddings()
assert x is None
snake_case: Optional[int] = model.get_bias()
assert name is None
def _UpperCamelCase ( self ):
'''simple docstring'''
pass
@slow
def _UpperCamelCase ( self ):
'''simple docstring'''
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case: str = TFTransfoXLModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
@unittest.skip(reason='This model doesn\'t play well with fit() due to not returning a single loss.' )
def _UpperCamelCase ( self ):
'''simple docstring'''
pass
@require_tf
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip('Skip test until #12651 is resolved.' )
@slow
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[Any] = TFTransfoXLLMHeadModel.from_pretrained('transfo-xl-wt103' )
# fmt: off
snake_case: Dict = tf.convert_to_tensor([[33,12_97,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,22,17_06,17,2_00_98,5,32_15,21,37,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,62_24,8_31,1_60_02,2,8,6_03,7_89_67,2_95_46,23,8_03,20,25,4_16,5,8,2_32,4,2_77,6,18_55,46_01,3,2_95_46,54,8,36_09,5,5_72_11,49,4,1,2_77,18,8,17_55,1_56_91,3,3_41,25,4_16,6_93,4_25_73,71,17,4_01,94,31,1_79_19,2,2_95_46,78_73,18,1,4_35,23,1_10_11,7_55,5,51_67,3,79_83,98,84,2,2_95_46,32_67,8,36_09,4,1,48_65,10_75,2,60_87,71,6,3_46,8,58_54,3,2_95_46,8_24,14_00,18_68,2,19,1_60,2,3_11,8,54_96,2,2_09_20,17,25,1_50_97,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
snake_case: Any = [33,12_97,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,22,17_06,17,2_00_98,5,32_15,21,37,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,62_24,8_31,1_60_02,2,8,6_03,7_89_67,2_95_46,23,8_03,20,25,4_16,5,8,2_32,4,2_77,6,18_55,46_01,3,2_95_46,54,8,36_09,5,5_72_11,49,4,1,2_77,18,8,17_55,1_56_91,3,3_41,25,4_16,6_93,4_25_73,71,17,4_01,94,31,1_79_19,2,2_95_46,78_73,18,1,4_35,23,1_10_11,7_55,5,51_67,3,79_83,98,84,2,2_95_46,32_67,8,36_09,4,1,48_65,10_75,2,60_87,71,6,3_46,8,58_54,3,2_95_46,8_24,14_00,18_68,2,19,1_60,2,3_11,8,54_96,2,2_09_20,17,25,1_50_97,3,24,24,0,33,1,18_57,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,28,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
snake_case: Tuple = model.generate(SCREAMING_SNAKE_CASE__ , max_length=2_00 , do_sample=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(output_ids[0].numpy().tolist() , SCREAMING_SNAKE_CASE__ ) | 709 |
'''simple docstring'''
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
__UpperCAmelCase = logging.get_logger(__name__)
# General docstring
__UpperCAmelCase = "PoolFormerConfig"
# Base docstring
__UpperCAmelCase = "sail/poolformer_s12"
__UpperCAmelCase = [1, 512, 7, 7]
# Image classification docstring
__UpperCAmelCase = "sail/poolformer_s12"
__UpperCAmelCase = "tabby, tabby cat"
__UpperCAmelCase = [
"sail/poolformer_s12",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def lowerCAmelCase_ ( __A : Tuple , __A : float = 0.0 , __A : bool = False ):
'''simple docstring'''
if drop_prob == 0.0 or not training:
return input
snake_case: Union[str, Any] = 1 - drop_prob
snake_case: List[Any] = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
snake_case: List[Any] = keep_prob + torch.rand(__A , dtype=input.dtype , device=input.device )
random_tensor.floor_() # binarize
snake_case: Any = input.div(__A ) * random_tensor
return output
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ = None ):
'''simple docstring'''
super().__init__()
snake_case: List[str] = drop_prob
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return drop_path(SCREAMING_SNAKE_CASE__ , self.drop_prob , self.training )
def _UpperCamelCase ( self ):
'''simple docstring'''
return "p={}".format(self.drop_prob )
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
super().__init__()
snake_case: List[str] = patch_size if isinstance(SCREAMING_SNAKE_CASE__ , collections.abc.Iterable ) else (patch_size, patch_size)
snake_case: List[str] = stride if isinstance(SCREAMING_SNAKE_CASE__ , collections.abc.Iterable ) else (stride, stride)
snake_case: Union[str, Any] = padding if isinstance(SCREAMING_SNAKE_CASE__ , collections.abc.Iterable ) else (padding, padding)
snake_case: Any = nn.Convad(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , kernel_size=SCREAMING_SNAKE_CASE__ , stride=SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ )
snake_case: Tuple = norm_layer(SCREAMING_SNAKE_CASE__ ) if norm_layer else nn.Identity()
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: int = self.projection(SCREAMING_SNAKE_CASE__ )
snake_case: Any = self.norm(SCREAMING_SNAKE_CASE__ )
return embeddings
class SCREAMING_SNAKE_CASE ( nn.GroupNorm ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
super().__init__(1 , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
super().__init__()
snake_case: str = nn.AvgPoolad(SCREAMING_SNAKE_CASE__ , stride=1 , padding=pool_size // 2 , count_include_pad=SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return self.pool(SCREAMING_SNAKE_CASE__ ) - hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
super().__init__()
snake_case: List[Any] = nn.Convad(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 1 )
snake_case: Any = nn.Convad(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 1 )
snake_case: str = PoolFormerDropPath(SCREAMING_SNAKE_CASE__ )
if isinstance(config.hidden_act , SCREAMING_SNAKE_CASE__ ):
snake_case: Tuple = ACTaFN[config.hidden_act]
else:
snake_case: int = config.hidden_act
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: Dict = self.conva(SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = self.act_fn(SCREAMING_SNAKE_CASE__ )
snake_case: List[str] = self.drop(SCREAMING_SNAKE_CASE__ )
snake_case: List[str] = self.conva(SCREAMING_SNAKE_CASE__ )
snake_case: str = self.drop(SCREAMING_SNAKE_CASE__ )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
super().__init__()
snake_case: Tuple = PoolFormerPooling(SCREAMING_SNAKE_CASE__ )
snake_case: Dict = PoolFormerOutput(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case: Dict = PoolFormerGroupNorm(SCREAMING_SNAKE_CASE__ )
snake_case: List[Any] = PoolFormerGroupNorm(SCREAMING_SNAKE_CASE__ )
# Useful for training neural nets
snake_case: Union[str, Any] = PoolFormerDropPath(SCREAMING_SNAKE_CASE__ ) if drop_path > 0.0 else nn.Identity()
snake_case: Optional[Any] = config.use_layer_scale
if config.use_layer_scale:
snake_case: Any = nn.Parameter(
config.layer_scale_init_value * torch.ones((SCREAMING_SNAKE_CASE__) ) , requires_grad=SCREAMING_SNAKE_CASE__ )
snake_case: int = nn.Parameter(
config.layer_scale_init_value * torch.ones((SCREAMING_SNAKE_CASE__) ) , requires_grad=SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if self.use_layer_scale:
snake_case: str = self.pooling(self.before_norm(SCREAMING_SNAKE_CASE__ ) )
snake_case: Dict = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
snake_case: str = hidden_states + self.drop_path(SCREAMING_SNAKE_CASE__ )
snake_case: Dict = ()
snake_case: Dict = self.output(self.after_norm(SCREAMING_SNAKE_CASE__ ) )
snake_case: Union[str, Any] = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
snake_case: Any = hidden_states + self.drop_path(SCREAMING_SNAKE_CASE__ )
snake_case: List[str] = (output,) + outputs
return outputs
else:
snake_case: Optional[Any] = self.drop_path(self.pooling(self.before_norm(SCREAMING_SNAKE_CASE__ ) ) )
# First residual connection
snake_case: Union[str, Any] = pooling_output + hidden_states
snake_case: List[Any] = ()
# Second residual connection inside the PoolFormerOutput block
snake_case: List[str] = self.drop_path(self.output(self.after_norm(SCREAMING_SNAKE_CASE__ ) ) )
snake_case: Dict = hidden_states + layer_output
snake_case: Optional[Any] = (output,) + outputs
return outputs
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
super().__init__()
snake_case: List[Any] = config
# stochastic depth decay rule
snake_case: List[Any] = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
snake_case: Union[str, Any] = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
snake_case: List[Any] = nn.ModuleList(SCREAMING_SNAKE_CASE__ )
# Transformer blocks
snake_case: str = []
snake_case: int = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
snake_case: List[str] = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
SCREAMING_SNAKE_CASE__ , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(SCREAMING_SNAKE_CASE__ ) )
snake_case: Tuple = nn.ModuleList(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=True ):
'''simple docstring'''
snake_case: str = () if output_hidden_states else None
snake_case: Dict = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
snake_case , snake_case: Dict = layers
# Get patch embeddings from hidden_states
snake_case: int = embedding_layer(SCREAMING_SNAKE_CASE__ )
# Send the embeddings through the blocks
for _, blk in enumerate(SCREAMING_SNAKE_CASE__ ):
snake_case: List[Any] = blk(SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = layer_outputs[0]
if output_hidden_states:
snake_case: List[str] = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=SCREAMING_SNAKE_CASE__ , hidden_states=SCREAMING_SNAKE_CASE__ )
class SCREAMING_SNAKE_CASE ( snake_case ):
'''simple docstring'''
__UpperCamelCase = PoolFormerConfig
__UpperCamelCase = "poolformer"
__UpperCamelCase = "pixel_values"
__UpperCamelCase = True
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE__ , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(SCREAMING_SNAKE_CASE__ , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False ):
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case: List[Any] = value
__UpperCAmelCase = R"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
__UpperCAmelCase = R"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n"
@add_start_docstrings(
"The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top." , snake_case , )
class SCREAMING_SNAKE_CASE ( snake_case ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
super().__init__(SCREAMING_SNAKE_CASE__ )
snake_case: List[Any] = config
snake_case: Tuple = PoolFormerEncoder(SCREAMING_SNAKE_CASE__ )
# Initialize weights and apply final processing
self.post_init()
def _UpperCamelCase ( self ):
'''simple docstring'''
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=SCREAMING_SNAKE_CASE__ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , ):
'''simple docstring'''
snake_case: Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
snake_case: List[Any] = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('You have to specify pixel_values' )
snake_case: Optional[Any] = self.encoder(
SCREAMING_SNAKE_CASE__ , output_hidden_states=SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ , )
snake_case: List[Any] = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=SCREAMING_SNAKE_CASE__ , hidden_states=encoder_outputs.hidden_states , )
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
super().__init__()
snake_case: Any = nn.Linear(config.hidden_size , config.hidden_size )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: int = self.dense(SCREAMING_SNAKE_CASE__ )
return output
@add_start_docstrings(
"\n PoolFormer Model transformer with an image classification head on top\n " , snake_case , )
class SCREAMING_SNAKE_CASE ( snake_case ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
super().__init__(SCREAMING_SNAKE_CASE__ )
snake_case: Union[str, Any] = config.num_labels
snake_case: str = PoolFormerModel(SCREAMING_SNAKE_CASE__ )
# Final norm
snake_case: int = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
snake_case: Dict = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=SCREAMING_SNAKE_CASE__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , ):
'''simple docstring'''
snake_case: Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
snake_case: Optional[Any] = self.poolformer(
SCREAMING_SNAKE_CASE__ , output_hidden_states=SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ , )
snake_case: Any = outputs[0]
snake_case: str = self.classifier(self.norm(SCREAMING_SNAKE_CASE__ ).mean([-2, -1] ) )
snake_case: Any = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
snake_case: Tuple = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
snake_case: Dict = 'single_label_classification'
else:
snake_case: List[str] = 'multi_label_classification'
if self.config.problem_type == "regression":
snake_case: Union[str, Any] = MSELoss()
if self.num_labels == 1:
snake_case: List[str] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
snake_case: int = loss_fct(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif self.config.problem_type == "single_label_classification":
snake_case: Union[str, Any] = CrossEntropyLoss()
snake_case: Dict = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
snake_case: int = BCEWithLogitsLoss()
snake_case: Optional[int] = loss_fct(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if not return_dict:
snake_case: str = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=SCREAMING_SNAKE_CASE__ , logits=SCREAMING_SNAKE_CASE__ , hidden_states=outputs.hidden_states ) | 692 | 0 |
__UpperCAmelCase = "Alexander Joslin"
import operator as op
from .stack import Stack
def lowerCAmelCase_ ( __A : str ):
'''simple docstring'''
snake_case: Any = {'*': op.mul, '/': op.truediv, '+': op.add, '-': op.sub}
snake_case: Stack[int] = Stack()
snake_case: Stack[str] = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(__A ) )
elif i in operators:
# RULE 2
operator_stack.push(__A )
elif i == ")":
# RULE 4
snake_case: Optional[Any] = operator_stack.peek()
operator_stack.pop()
snake_case: List[Any] = operand_stack.peek()
operand_stack.pop()
snake_case: str = operand_stack.peek()
operand_stack.pop()
snake_case: Any = operators[opr](__A , __A )
operand_stack.push(__A )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
__UpperCAmelCase = "(5 + ((4 * 2) * (2 + 3)))"
# answer = 45
print(F'{equation} = {dijkstras_two_stack_algorithm(equation)}') | 710 |
'''simple docstring'''
from queue import PriorityQueue
from typing import Any
import numpy as np
def lowerCAmelCase_ ( __A : dict , __A : str , __A : set , __A : set , __A : dict , __A : dict , __A : PriorityQueue , __A : dict , __A : float | int , ):
'''simple docstring'''
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
snake_case: Any = cst_fwd.get(__A , np.inf )
snake_case: int = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
snake_case: Union[str, Any] = new_cost_f
snake_case: Tuple = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
snake_case: List[str] = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def lowerCAmelCase_ ( __A : str , __A : str , __A : dict , __A : dict ):
'''simple docstring'''
snake_case: Optional[Any] = -1
snake_case: Any = set()
snake_case: str = set()
snake_case: int = {source: 0}
snake_case: Dict = {destination: 0}
snake_case: int = {source: None}
snake_case: Union[str, Any] = {destination: None}
snake_case: PriorityQueue[Any] = PriorityQueue()
snake_case: PriorityQueue[Any] = PriorityQueue()
snake_case: Tuple = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
snake_case , snake_case: List[str] = queue_forward.get()
visited_forward.add(__A )
snake_case , snake_case: int = queue_backward.get()
visited_backward.add(__A )
snake_case: str = pass_and_relaxation(
__A , __A , __A , __A , __A , __A , __A , __A , __A , )
snake_case: Optional[Any] = pass_and_relaxation(
__A , __A , __A , __A , __A , __A , __A , __A , __A , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
snake_case: Any = shortest_distance
return shortest_path_distance
__UpperCAmelCase = {
"B": [["C", 1]],
"C": [["D", 1]],
"D": [["F", 1]],
"E": [["B", 1], ["G", 2]],
"F": [],
"G": [["F", 1]],
}
__UpperCAmelCase = {
"B": [["E", 1]],
"C": [["B", 1]],
"D": [["C", 1]],
"F": [["D", 1], ["G", 1]],
"E": [[None, np.inf]],
"G": [["E", 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod() | 692 | 0 |
'''simple docstring'''
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def lowerCAmelCase_ ( ):
'''simple docstring'''
snake_case: Optional[int] = 'https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'
snake_case: str = Image.open(requests.get(__A , stream=__A ).raw ).convert('RGB' )
return image
def lowerCAmelCase_ ( __A : Dict ):
'''simple docstring'''
snake_case: List[Any] = []
# fmt: off
# vision encoder
rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') )
rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') )
rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') )
rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') )
rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') )
rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f"""visual_encoder.blocks.{i}.norm1.weight""", f"""vision_model.encoder.layers.{i}.layer_norm1.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.norm1.bias""", f"""vision_model.encoder.layers.{i}.layer_norm1.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.norm2.weight""", f"""vision_model.encoder.layers.{i}.layer_norm2.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.norm2.bias""", f"""vision_model.encoder.layers.{i}.layer_norm2.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.attn.qkv.weight""", f"""vision_model.encoder.layers.{i}.self_attn.qkv.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.attn.proj.weight""", f"""vision_model.encoder.layers.{i}.self_attn.projection.weight""",) )
rename_keys.append((f"""visual_encoder.blocks.{i}.attn.proj.bias""", f"""vision_model.encoder.layers.{i}.self_attn.projection.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc1.weight""", f"""vision_model.encoder.layers.{i}.mlp.fc1.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc1.bias""", f"""vision_model.encoder.layers.{i}.mlp.fc1.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc2.weight""", f"""vision_model.encoder.layers.{i}.mlp.fc2.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc2.bias""", f"""vision_model.encoder.layers.{i}.mlp.fc2.bias""") )
# QFormer
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.layernorm.weight') )
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.layernorm.bias') )
# fmt: on
return rename_keys
def lowerCAmelCase_ ( __A : str , __A : List[str] , __A : Dict ):
'''simple docstring'''
snake_case: List[Any] = dct.pop(__A )
snake_case: int = val
def lowerCAmelCase_ ( __A : Union[str, Any] , __A : List[Any] ):
'''simple docstring'''
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
snake_case: List[Any] = state_dict.pop(f"""visual_encoder.blocks.{i}.attn.q_bias""" )
snake_case: Dict = state_dict.pop(f"""visual_encoder.blocks.{i}.attn.v_bias""" )
# next, set bias in the state dict
snake_case: Dict = torch.cat((q_bias, torch.zeros_like(__A , requires_grad=__A ), v_bias) )
snake_case: Union[str, Any] = qkv_bias
def lowerCAmelCase_ ( __A : Any , __A : Tuple ):
'''simple docstring'''
snake_case: str = 3_64 if 'coco' in model_name else 2_24
snake_case: Optional[Any] = BlipaVisionConfig(image_size=__A ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
snake_case: Any = OPTConfig.from_pretrained('facebook/opt-2.7b' , eos_token_id=__A ).to_dict()
elif "opt-6.7b" in model_name:
snake_case: List[str] = OPTConfig.from_pretrained('facebook/opt-6.7b' , eos_token_id=__A ).to_dict()
elif "t5-xl" in model_name:
snake_case: Optional[int] = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
snake_case: Optional[int] = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
snake_case: Optional[Any] = BlipaConfig(vision_config=__A , text_config=__A )
return config, image_size
@torch.no_grad()
def lowerCAmelCase_ ( __A : List[str] , __A : str=None , __A : List[Any]=False ):
'''simple docstring'''
snake_case: Optional[Any] = (
AutoTokenizer.from_pretrained('facebook/opt-2.7b' )
if 'opt' in model_name
else AutoTokenizer.from_pretrained('google/flan-t5-xl' )
)
snake_case: Optional[int] = tokenizer('\n' , add_special_tokens=__A ).input_ids[0]
snake_case: str = get_blipa_config(__A , eos_token_id=__A )
snake_case: Union[str, Any] = BlipaForConditionalGeneration(__A ).eval()
snake_case: List[Any] = {
'blip2-opt-2.7b': ('blip2_opt', 'pretrain_opt2.7b'),
'blip2-opt-6.7b': ('blip2_opt', 'pretrain_opt6.7b'),
'blip2-opt-2.7b-coco': ('blip2_opt', 'caption_coco_opt2.7b'),
'blip2-opt-6.7b-coco': ('blip2_opt', 'caption_coco_opt6.7b'),
'blip2-flan-t5-xl': ('blip2_t5', 'pretrain_flant5xl'),
'blip2-flan-t5-xl-coco': ('blip2_t5', 'caption_coco_flant5xl'),
'blip2-flan-t5-xxl': ('blip2_t5', 'pretrain_flant5xxl'),
}
snake_case: Optional[int] = model_name_to_original[model_name]
# load original model
print('Loading original model...' )
snake_case: List[Any] = 'cuda' if torch.cuda.is_available() else 'cpu'
snake_case: Union[str, Any] = load_model_and_preprocess(
name=__A , model_type=__A , is_eval=__A , device=__A )
original_model.eval()
print('Done!' )
# update state dict keys
snake_case: Optional[int] = original_model.state_dict()
snake_case: List[str] = create_rename_keys(__A )
for src, dest in rename_keys:
rename_key(__A , __A , __A )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
snake_case: Union[str, Any] = state_dict.pop(__A )
if key.startswith('Qformer.bert' ):
snake_case: str = key.replace('Qformer.bert' , 'qformer' )
if "attention.self" in key:
snake_case: Optional[Any] = key.replace('self' , 'attention' )
if "opt_proj" in key:
snake_case: Tuple = key.replace('opt_proj' , 'language_projection' )
if "t5_proj" in key:
snake_case: List[Any] = key.replace('t5_proj' , 'language_projection' )
if key.startswith('opt' ):
snake_case: List[Any] = key.replace('opt' , 'language' )
if key.startswith('t5' ):
snake_case: List[Any] = key.replace('t5' , 'language' )
snake_case: Tuple = val
# read in qv biases
read_in_q_v_bias(__A , __A )
snake_case: Optional[int] = hf_model.load_state_dict(__A , strict=__A )
assert len(__A ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
snake_case: Union[str, Any] = load_demo_image()
snake_case: Tuple = vis_processors['eval'](__A ).unsqueeze(0 ).to(__A )
snake_case: Optional[Any] = tokenizer(['\n'] , return_tensors='pt' ).input_ids.to(__A )
# create processor
snake_case: Dict = BlipImageProcessor(
size={'height': image_size, 'width': image_size} , image_mean=__A , image_std=__A )
snake_case: Any = BlipaProcessor(image_processor=__A , tokenizer=__A )
snake_case: str = processor(images=__A , return_tensors='pt' ).pixel_values.to(__A )
# make sure processor creates exact same pixel values
assert torch.allclose(__A , __A )
original_model.to(__A )
hf_model.to(__A )
with torch.no_grad():
if "opt" in model_name:
snake_case: Dict = original_model({'image': original_pixel_values, 'text_input': ['']} ).logits
snake_case: List[Any] = hf_model(__A , __A ).logits
else:
snake_case: List[str] = original_model(
{'image': original_pixel_values, 'text_input': ['\n'], 'text_output': ['\n']} ).logits
snake_case: Dict = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 )
snake_case: str = hf_model(__A , __A , labels=__A ).logits
assert original_logits.shape == logits.shape
print('First values of original logits:' , original_logits[0, :3, :3] )
print('First values of HF logits:' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
snake_case: List[str] = torch.tensor(
[[-41.58_50, -4.44_40, -8.99_22], [-47.43_22, -5.91_43, -1.73_40]] , device=__A )
assert torch.allclose(logits[0, :3, :3] , __A , atol=1E-4 )
elif model_name == "blip2-flan-t5-xl-coco":
snake_case: str = torch.tensor(
[[-57.01_09, -9.89_67, -12.62_80], [-68.65_78, -12.71_91, -10.50_65]] , device=__A )
else:
# cast to same type
snake_case: Tuple = logits.dtype
assert torch.allclose(original_logits.to(__A ) , __A , atol=1E-2 )
print('Looks ok!' )
print('Generating a caption...' )
snake_case: Optional[int] = ''
snake_case: Union[str, Any] = tokenizer(__A , return_tensors='pt' ).input_ids.to(__A )
snake_case: List[Any] = original_model.generate({'image': original_pixel_values} )
snake_case: Optional[Any] = hf_model.generate(
__A , __A , do_sample=__A , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('Original generation:' , __A )
snake_case: List[str] = input_ids.shape[1]
snake_case: Dict = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=__A )
snake_case: Any = [text.strip() for text in output_text]
print('HF generation:' , __A )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(__A )
hf_model.save_pretrained(__A )
if push_to_hub:
processor.push_to_hub(f"""nielsr/{model_name}""" )
hf_model.push_to_hub(f"""nielsr/{model_name}""" )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
__UpperCAmelCase = [
"blip2-opt-2.7b",
"blip2-opt-6.7b",
"blip2-opt-2.7b-coco",
"blip2-opt-6.7b-coco",
"blip2-flan-t5-xl",
"blip2-flan-t5-xl-coco",
"blip2-flan-t5-xxl",
]
parser.add_argument(
"--model_name",
default="blip2-opt-2.7b",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
__UpperCAmelCase = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 711 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = "▁"
__UpperCAmelCase = {"vocab_file": "sentencepiece.bpe.model"}
__UpperCAmelCase = {
"vocab_file": {
"facebook/xglm-564M": "https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model",
}
}
__UpperCAmelCase = {
"facebook/xglm-564M": 2_048,
}
class SCREAMING_SNAKE_CASE ( snake_case ):
'''simple docstring'''
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = ["input_ids", "attention_mask"]
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="<unk>" , SCREAMING_SNAKE_CASE__="<pad>" , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ):
'''simple docstring'''
snake_case: Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
snake_case: Optional[Any] = 7
snake_case: List[str] = [F"""<madeupword{i}>""" for i in range(self.num_madeup_words )]
snake_case: str = kwargs.get('additional_special_tokens' , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE__ , )
snake_case: int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(SCREAMING_SNAKE_CASE__ ) )
snake_case: int = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
snake_case: Tuple = 1
# Mimic fairseq token-to-id alignment for the first 4 token
snake_case: Optional[Any] = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
snake_case: Union[str, Any] = len(self.sp_model )
snake_case: str = {F"""<madeupword{i}>""": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(SCREAMING_SNAKE_CASE__ )
snake_case: Union[str, Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ):
'''simple docstring'''
snake_case: List[Any] = self.__dict__.copy()
snake_case: Union[str, Any] = None
snake_case: Union[str, Any] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: Optional[int] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
snake_case: Union[str, Any] = {}
snake_case: Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
snake_case: Optional[Any] = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE__ , token_ids_a=SCREAMING_SNAKE_CASE__ , already_has_special_tokens=SCREAMING_SNAKE_CASE__ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ ))
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE__ ))
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
'''simple docstring'''
snake_case: int = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[int] = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return self.sp_model.encode(SCREAMING_SNAKE_CASE__ , out_type=SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
snake_case: Dict = self.sp_model.PieceToId(SCREAMING_SNAKE_CASE__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: Optional[Any] = ''.join(SCREAMING_SNAKE_CASE__ ).replace(SCREAMING_SNAKE_CASE__ , ' ' ).strip()
return out_string
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
'''simple docstring'''
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case: List[str] = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE__ , 'wb' ) as fi:
snake_case: int = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE__ )
return (out_vocab_file,) | 692 | 0 |
'''simple docstring'''
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def lowerCAmelCase_ ( __A : Dict ):
'''simple docstring'''
snake_case: Union[str, Any] = filter(lambda __A : p.requires_grad , model.parameters() )
snake_case: List[str] = sum([np.prod(p.size() ) for p in model_parameters] )
return params
__UpperCAmelCase = logging.getLogger(__name__)
def lowerCAmelCase_ ( __A : List[Any] , __A : Optional[int] ):
'''simple docstring'''
if metric == "rouge2":
snake_case: Union[str, Any] = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
snake_case: List[Any] = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
snake_case: List[str] = '{val_avg_em:.4f}-{step_count}'
else:
raise NotImplementedError(
f"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
' function.' )
snake_case: Any = ModelCheckpoint(
dirpath=__A , filename=__A , monitor=f"""val_{metric}""" , mode='max' , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def lowerCAmelCase_ ( __A : Optional[Any] , __A : List[Any] ):
'''simple docstring'''
return EarlyStopping(
monitor=f"""val_{metric}""" , mode='min' if 'loss' in metric else 'max' , patience=__A , verbose=__A , )
class SCREAMING_SNAKE_CASE ( pl.Callback ):
'''simple docstring'''
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: str = {F"""lr_group_{i}""": param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(SCREAMING_SNAKE_CASE__ )
@rank_zero_only
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=True ):
'''simple docstring'''
logger.info(F"""***** {type_path} results at step {trainer.global_step:05d} *****""" )
snake_case: Optional[int] = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} )
# Log results
snake_case: Dict = Path(pl_module.hparams.output_dir )
if type_path == "test":
snake_case: Union[str, Any] = od / 'test_results.txt'
snake_case: Dict = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
snake_case: Union[str, Any] = od / F"""{type_path}_results/{trainer.global_step:05d}.txt"""
snake_case: Union[str, Any] = od / F"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
generations_file.parent.mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
with open(SCREAMING_SNAKE_CASE__ , 'a+' ) as writer:
for key in sorted(SCREAMING_SNAKE_CASE__ ):
if key in ["log", "progress_bar", "preds"]:
continue
snake_case: Union[str, Any] = metrics[key]
if isinstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ):
snake_case: Dict = val.item()
snake_case: Union[str, Any] = F"""{key}: {val:.6f}\n"""
writer.write(SCREAMING_SNAKE_CASE__ )
if not save_generations:
return
if "preds" in metrics:
snake_case: List[Any] = '\n'.join(metrics['preds'] )
generations_file.open('w+' ).write(SCREAMING_SNAKE_CASE__ )
@rank_zero_only
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
try:
snake_case: Union[str, Any] = pl_module.model.model.num_parameters()
except AttributeError:
snake_case: Tuple = pl_module.model.num_parameters()
snake_case: List[str] = count_trainable_parameters(SCREAMING_SNAKE_CASE__ )
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1E6, 'grad_mp': n_trainable_pars / 1E6} )
@rank_zero_only
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'test' )
@rank_zero_only
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid") | 712 |
'''simple docstring'''
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def lowerCAmelCase_ ( __A : Optional[Any] ):
'''simple docstring'''
return getitem, k
def lowerCAmelCase_ ( __A : Any , __A : Optional[int] ):
'''simple docstring'''
return setitem, k, v
def lowerCAmelCase_ ( __A : List[str] ):
'''simple docstring'''
return delitem, k
def lowerCAmelCase_ ( __A : str , __A : int , *__A : Tuple ):
'''simple docstring'''
try:
return fun(__A , *__A ), None
except Exception as e:
return None, e
__UpperCAmelCase = (
_set("key_a", "val_a"),
_set("key_b", "val_b"),
)
__UpperCAmelCase = [
_set("key_a", "val_a"),
_set("key_a", "val_b"),
]
__UpperCAmelCase = [
_set("key_a", "val_a"),
_set("key_b", "val_b"),
_del("key_a"),
_del("key_b"),
_set("key_a", "val_a"),
_del("key_a"),
]
__UpperCAmelCase = [
_get("key_a"),
_del("key_a"),
_set("key_a", "val_a"),
_del("key_a"),
_del("key_a"),
_get("key_a"),
]
__UpperCAmelCase = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
__UpperCAmelCase = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set("key_a", "val_b"),
]
@pytest.mark.parametrize(
'operations' , (
pytest.param(_add_items , id='add items' ),
pytest.param(_overwrite_items , id='overwrite items' ),
pytest.param(_delete_items , id='delete items' ),
pytest.param(_access_absent_items , id='access absent items' ),
pytest.param(_add_with_resize_up , id='add with resize up' ),
pytest.param(_add_with_resize_down , id='add with resize down' ),
) , )
def lowerCAmelCase_ ( __A : str ):
'''simple docstring'''
snake_case: List[Any] = HashMap(initial_block_size=4 )
snake_case: List[Any] = {}
for _, (fun, *args) in enumerate(__A ):
snake_case , snake_case: Optional[int] = _run_operation(__A , __A , *__A )
snake_case , snake_case: str = _run_operation(__A , __A , *__A )
assert my_res == py_res
assert str(__A ) == str(__A )
assert set(__A ) == set(__A )
assert len(__A ) == len(__A )
assert set(my.items() ) == set(py.items() )
def lowerCAmelCase_ ( ):
'''simple docstring'''
def is_public(__A : str ) -> bool:
return not name.startswith('_' )
snake_case: Dict = {name for name in dir({} ) if is_public(__A )}
snake_case: List[str] = {name for name in dir(HashMap() ) if is_public(__A )}
assert dict_public_names > hash_public_names | 692 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"microsoft/swinv2-tiny-patch4-window8-256": (
"https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"
),
}
class SCREAMING_SNAKE_CASE ( snake_case ):
'''simple docstring'''
__UpperCamelCase = "swinv2"
__UpperCamelCase = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , SCREAMING_SNAKE_CASE__=2_24 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=96 , SCREAMING_SNAKE_CASE__=[2, 2, 6, 2] , SCREAMING_SNAKE_CASE__=[3, 6, 12, 24] , SCREAMING_SNAKE_CASE__=7 , SCREAMING_SNAKE_CASE__=4.0 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-5 , SCREAMING_SNAKE_CASE__=32 , **SCREAMING_SNAKE_CASE__ , ):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE__ )
snake_case: int = image_size
snake_case: Union[str, Any] = patch_size
snake_case: List[str] = num_channels
snake_case: Tuple = embed_dim
snake_case: str = depths
snake_case: Any = len(SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = num_heads
snake_case: Optional[int] = window_size
snake_case: Any = mlp_ratio
snake_case: Optional[int] = qkv_bias
snake_case: Union[str, Any] = hidden_dropout_prob
snake_case: List[str] = attention_probs_dropout_prob
snake_case: Dict = drop_path_rate
snake_case: List[str] = hidden_act
snake_case: int = use_absolute_embeddings
snake_case: Any = layer_norm_eps
snake_case: Dict = initializer_range
snake_case: List[Any] = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
snake_case: Tuple = int(embed_dim * 2 ** (len(SCREAMING_SNAKE_CASE__ ) - 1) )
snake_case: Union[str, Any] = (0, 0, 0, 0) | 713 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
__UpperCAmelCase = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def lowerCAmelCase_ ( __A : Any , __A : Optional[Any] , __A : Union[str, Any] , __A : int , __A : Optional[int] ):
'''simple docstring'''
for attribute in key.split('.' ):
snake_case: List[str] = getattr(__A , __A )
if weight_type is not None:
snake_case: Optional[int] = getattr(__A , __A ).shape
else:
snake_case: Optional[int] = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
snake_case: Optional[int] = value
elif weight_type == "weight_g":
snake_case: List[str] = value
elif weight_type == "weight_v":
snake_case: Dict = value
elif weight_type == "bias":
snake_case: Optional[Any] = value
else:
snake_case: int = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def lowerCAmelCase_ ( __A : List[Any] , __A : List[str] ):
'''simple docstring'''
snake_case: List[Any] = []
snake_case: List[Any] = fairseq_model.state_dict()
snake_case: Union[str, Any] = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
snake_case: Dict = None
for name, value in fairseq_dict.items():
snake_case: Tuple = False
if "conv_layers" in name:
load_conv_layer(
__A , __A , __A , __A , hf_model.config.feat_extract_norm == 'group' , )
snake_case: List[Any] = True
elif name.split('.' )[0] == "proj":
snake_case: List[Any] = fairseq_model.proj
snake_case: int = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
snake_case: int = True
if "*" in mapped_key:
snake_case: List[str] = name.split(__A )[0].split('.' )[-2]
snake_case: Dict = mapped_key.replace('*' , __A )
if "weight_g" in name:
snake_case: Tuple = 'weight_g'
elif "weight_v" in name:
snake_case: int = 'weight_v'
elif "bias" in name:
snake_case: Tuple = 'bias'
elif "weight" in name:
snake_case: List[Any] = 'weight'
else:
snake_case: Any = None
set_recursively(__A , __A , __A , __A , __A )
continue
if not is_used:
unused_weights.append(__A )
logger.warning(f"""Unused weights: {unused_weights}""" )
return proj_weight
def lowerCAmelCase_ ( __A : List[str] , __A : List[Any] , __A : int , __A : Optional[Any] , __A : Optional[int] ):
'''simple docstring'''
snake_case: int = full_name.split('conv_layers.' )[-1]
snake_case: Tuple = name.split('.' )
snake_case: Any = int(items[0] )
snake_case: Optional[int] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
snake_case: Tuple = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
snake_case: int = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
snake_case: Any = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
snake_case: str = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__A )
def lowerCAmelCase_ ( __A : Dict ):
'''simple docstring'''
snake_case , snake_case: List[Any] = emb.weight.shape
snake_case: Optional[int] = nn.Linear(__A , __A , bias=__A )
snake_case: Any = emb.weight.data
return lin_layer
def lowerCAmelCase_ ( __A : Optional[int] ):
'''simple docstring'''
with open(__A , 'r' , encoding='utf-8' ) as f:
snake_case: List[Any] = f.readlines()
snake_case: Any = [line.split(' ' )[0] for line in lines]
snake_case: int = len(__A )
snake_case: Dict = {
'<s>': 0,
'<pad>': 1,
'</s>': 2,
'<unk>': 3,
}
vocab_dict.update(dict(zip(__A , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def lowerCAmelCase_ ( __A : Optional[Any] , __A : Union[str, Any] , __A : Dict , __A : Any , __A : List[Any] , __A : int , __A : str , ):
'''simple docstring'''
snake_case: Union[str, Any] = WavaVecaConfig.from_pretrained(__A )
snake_case: str = SpeechaTextaConfig.from_pretrained(
__A , vocab_size=__A , decoder_layers=__A , do_stable_layer_norm=__A )
snake_case: List[str] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=__A , return_attention_mask=__A , )
snake_case , snake_case , snake_case: List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
snake_case: List[Any] = model[0].eval()
# set weights for wav2vec2 encoder
snake_case: Optional[Any] = WavaVecaModel(__A )
snake_case: Any = recursively_load_weights_wavaveca(model.encoder , __A )
snake_case: Union[str, Any] = SpeechaTextaForCausalLM(__A )
snake_case , snake_case: Optional[Any] = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=__A )
# set output linear layer
unexpected_keys.remove('embed_out' )
snake_case: str = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(f"""The following keys are missing when loading the decoder weights: {missing_keys}""" )
logger.warning(f"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" )
snake_case: int = SpeechEncoderDecoderModel(encoder=__A , decoder=__A )
snake_case: List[Any] = False
# add projection layer
snake_case: Union[str, Any] = nn.Parameter(projection_layer.weight )
snake_case: Union[str, Any] = nn.Parameter(projection_layer.bias )
snake_case: List[Any] = create_vocab_dict(__A )
with open(os.path.join(__A , 'vocab.json' ) , 'w' ) as fp:
json.dump(__A , __A )
snake_case: Union[str, Any] = SpeechaTextaTokenizer(os.path.join(__A , 'vocab.json' ) )
tokenizer.save_pretrained(__A )
snake_case: Tuple = hf_wavavec.config.to_dict()
snake_case: int = tokenizer.pad_token_id
snake_case: Dict = tokenizer.bos_token_id
snake_case: Optional[int] = tokenizer.eos_token_id
snake_case: Dict = 'speech_to_text_2'
snake_case: Optional[Any] = 'wav2vec2'
snake_case: Tuple = SpeechEncoderDecoderConfig.from_dict(__A )
hf_wavavec.save_pretrained(__A )
feature_extractor.save_pretrained(__A )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument(
"--encoder_config_path",
default="facebook/wav2vec2-large-lv60",
type=str,
help="Path to hf encoder wav2vec2 checkpoint config",
)
parser.add_argument(
"--decoder_config_path",
default="facebook/s2t-small-mustc-en-fr-st",
type=str,
help="Path to hf decoder s2t checkpoint config",
)
parser.add_argument("--vocab_size", default=10_224, type=int, help="Vocab size of decoder")
parser.add_argument("--num_decoder_layers", default=7, type=int, help="Number of decoder layers")
__UpperCAmelCase = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
) | 692 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__UpperCAmelCase = {
"configuration_vivit": ["VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "VivitConfig"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ["VivitImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"VivitModel",
"VivitPreTrainedModel",
"VivitForVideoClassification",
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 714 |
'''simple docstring'''
def lowerCAmelCase_ ( __A : int = 1_00 ):
'''simple docstring'''
snake_case: List[str] = n * (n + 1) * (2 * n + 1) / 6
snake_case: List[Any] = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F'{solution() = }') | 692 | 0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.