code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
_UpperCAmelCase : List[Any] = range(2, 20 + 1)
_UpperCAmelCase : int = [10**k for k in range(ks[-1] + 1)]
_UpperCAmelCase : int = {}
def A ( lowercase , lowercase , lowercase , lowercase ) -> List[Any]:
'''simple docstring'''
UpperCamelCase = sum(a_i[j] for j in range(lowercase , len(lowercase ) ) )
UpperCamelCase = sum(a_i[j] * base[j] for j in range(min(len(lowercase ) , lowercase ) ) )
UpperCamelCase = 0, 0
UpperCamelCase = n - i
UpperCamelCase = memo.get(lowercase )
if sub_memo is not None:
UpperCamelCase = sub_memo.get(lowercase )
if jumps is not None and len(lowercase ) > 0:
# find and make the largest jump without going over
UpperCamelCase = -1
for _k in range(len(lowercase ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
UpperCamelCase = _k
break
if max_jump >= 0:
UpperCamelCase = jumps[max_jump]
# since the difference between jumps is cached, add c
UpperCamelCase = diff + c
for j in range(min(lowercase , len(lowercase ) ) ):
UpperCamelCase = divmod(lowercase , 10 )
if new_c > 0:
add(lowercase , lowercase , lowercase )
else:
UpperCamelCase = []
else:
UpperCamelCase = {c: []}
UpperCamelCase = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
UpperCamelCase = next_term(lowercase , k - 1 , i + dn , lowercase )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
UpperCamelCase = compute(lowercase , lowercase , i + dn , lowercase )
diff += _diff
dn += terms_jumped
UpperCamelCase = sub_memo[c]
# keep jumps sorted by # of terms skipped
UpperCamelCase = 0
while j < len(lowercase ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(lowercase , (diff, dn, k) )
return (diff, dn)
def A ( lowercase , lowercase , lowercase , lowercase ) -> List[Any]:
'''simple docstring'''
if i >= n:
return 0, i
if k > len(lowercase ):
a_i.extend([0 for _ in range(k - len(lowercase ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
UpperCamelCase = i
UpperCamelCase = 0, 0, 0
for j in range(len(lowercase ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
UpperCamelCase = ds_c + ds_b
diff += addend
UpperCamelCase = 0
for j in range(lowercase ):
UpperCamelCase = a_i[j] + addend
UpperCamelCase = divmod(lowercase , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(lowercase , lowercase , lowercase )
return diff, i - start_i
def A ( lowercase , lowercase , lowercase ) -> Tuple:
'''simple docstring'''
for j in range(lowercase , len(lowercase ) ):
UpperCamelCase = digits[j] + addend
if s >= 10:
UpperCamelCase = divmod(lowercase , 10 )
UpperCamelCase = addend // 10 + quotient
else:
UpperCamelCase = s
UpperCamelCase = addend // 10
if addend == 0:
break
while addend > 0:
UpperCamelCase = divmod(lowercase , 10 )
digits.append(lowercase )
def A ( lowercase = 10**15 ) -> int:
'''simple docstring'''
UpperCamelCase = [1]
UpperCamelCase = 1
UpperCamelCase = 0
while True:
UpperCamelCase = next_term(lowercase , 20 , i + dn , lowercase )
dn += terms_jumped
if dn == n - i:
break
UpperCamelCase = 0
for j in range(len(lowercase ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(F'''{solution() = }''')
| 716 |
from abc import ABC, abstractmethod
from typing import List, Optional
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self ) -> Optional[Any]:
"""simple docstring"""
# test for the above condition
self.test()
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = 0
UpperCamelCase = False
while not completed:
if counter == 1:
self.reset()
UpperCamelCase = self.advance()
if not self.does_advance(A_ ):
raise Exception(
'Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.' )
UpperCamelCase , UpperCamelCase , UpperCamelCase = self.update(A_ )
counter += 1
if counter > 10_000:
raise Exception('update() does not fulfill the constraint.' )
if self.remaining() != 0:
raise Exception('Custom Constraint is not defined correctly.' )
@abstractmethod
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __UpperCamelCase ( self , A_ ) -> str:
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __UpperCamelCase ( self , A_ ) -> int:
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __UpperCamelCase ( self , A_=False ) -> int:
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ ) -> Any:
"""simple docstring"""
super(A_ , self ).__init__()
if not isinstance(A_ , A_ ) or len(A_ ) == 0:
raise ValueError(F'''`token_ids` has to be a non-empty list, but is {token_ids}.''' )
if any((not isinstance(A_ , A_ ) or token_id < 0) for token_id in token_ids ):
raise ValueError(F'''Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.''' )
UpperCamelCase = token_ids
UpperCamelCase = len(self.token_ids )
UpperCamelCase = -1 # the index of the currently fulfilled step
UpperCamelCase = False
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def __UpperCamelCase ( self , A_ ) -> Optional[int]:
"""simple docstring"""
if not isinstance(A_ , A_ ):
raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(A_ )}''' )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def __UpperCamelCase ( self , A_ ) -> Optional[int]:
"""simple docstring"""
if not isinstance(A_ , A_ ):
raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(A_ )}''' )
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
if self.does_advance(A_ ):
self.fulfilled_idx += 1
UpperCamelCase = True
if self.fulfilled_idx == (self.seqlen - 1):
UpperCamelCase = True
UpperCamelCase = completed
else:
# failed to make progress.
UpperCamelCase = True
self.reset()
return stepped, completed, reset
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = False
UpperCamelCase = 0
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
return self.seqlen - (self.fulfilled_idx + 1)
def __UpperCamelCase ( self , A_=False ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = PhrasalConstraint(self.token_ids )
if stateful:
UpperCamelCase = self.seqlen
UpperCamelCase = self.fulfilled_idx
UpperCamelCase = self.completed
return new_constraint
class lowercase :
def __init__( self , A_ , A_=True ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = max([len(A_ ) for one in nested_token_ids] )
UpperCamelCase = {}
for token_ids in nested_token_ids:
UpperCamelCase = root
for tidx, token_id in enumerate(A_ ):
if token_id not in level:
UpperCamelCase = {}
UpperCamelCase = level[token_id]
if no_subsets and self.has_subsets(A_ , A_ ):
raise ValueError(
'Each list in `nested_token_ids` can\'t be a complete subset of another list, but is'
F''' {nested_token_ids}.''' )
UpperCamelCase = root
def __UpperCamelCase ( self , A_ ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = self.trie
for current_token in current_seq:
UpperCamelCase = start[current_token]
UpperCamelCase = list(start.keys() )
return next_tokens
def __UpperCamelCase ( self , A_ ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = self.next_tokens(A_ )
return len(A_ ) == 0
def __UpperCamelCase ( self , A_ ) -> List[str]:
"""simple docstring"""
UpperCamelCase = list(root.values() )
if len(A_ ) == 0:
return 1
else:
return sum([self.count_leaves(A_ ) for nn in next_nodes] )
def __UpperCamelCase ( self , A_ , A_ ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = self.count_leaves(A_ )
return len(A_ ) != leaf_count
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ ) -> str:
"""simple docstring"""
super(A_ , self ).__init__()
if not isinstance(A_ , A_ ) or len(A_ ) == 0:
raise ValueError(F'''`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.''' )
if any(not isinstance(A_ , A_ ) for token_ids in nested_token_ids ):
raise ValueError(F'''`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.''' )
if any(
any((not isinstance(A_ , A_ ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
F'''Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.''' )
UpperCamelCase = DisjunctiveTrie(A_ )
UpperCamelCase = nested_token_ids
UpperCamelCase = self.trie.max_height
UpperCamelCase = []
UpperCamelCase = False
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = self.trie.next_tokens(self.current_seq )
if len(A_ ) == 0:
return None
else:
return token_list
def __UpperCamelCase ( self , A_ ) -> Optional[Any]:
"""simple docstring"""
if not isinstance(A_ , A_ ):
raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(A_ )}''' )
UpperCamelCase = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def __UpperCamelCase ( self , A_ ) -> Optional[Any]:
"""simple docstring"""
if not isinstance(A_ , A_ ):
raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(A_ )}''' )
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
if self.does_advance(A_ ):
self.current_seq.append(A_ )
UpperCamelCase = True
else:
UpperCamelCase = True
self.reset()
UpperCamelCase = self.trie.reached_leaf(self.current_seq )
UpperCamelCase = completed
return stepped, completed, reset
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
UpperCamelCase = False
UpperCamelCase = []
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def __UpperCamelCase ( self , A_=False ) -> int:
"""simple docstring"""
UpperCamelCase = DisjunctiveConstraint(self.token_ids )
if stateful:
UpperCamelCase = self.seqlen
UpperCamelCase = self.current_seq
UpperCamelCase = self.completed
return new_constraint
class lowercase :
def __init__( self , A_ ) -> Tuple:
"""simple docstring"""
UpperCamelCase = constraints
# max # of steps required to fulfill a given constraint
UpperCamelCase = max([c.seqlen for c in constraints] )
UpperCamelCase = len(A_ )
UpperCamelCase = False
self.init_state()
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = []
UpperCamelCase = None
UpperCamelCase = [constraint.copy(stateful=A_ ) for constraint in self.constraints]
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
UpperCamelCase = constraint.advance()
if isinstance(A_ , A_ ):
token_list.append(A_ )
elif isinstance(A_ , A_ ):
token_list.extend(A_ )
else:
UpperCamelCase = self.inprogress_constraint.advance()
if isinstance(A_ , A_ ):
token_list.append(A_ )
elif isinstance(A_ , A_ ):
token_list.extend(A_ )
if len(A_ ) == 0:
return None
else:
return token_list
def __UpperCamelCase ( self , A_ ) -> Any:
"""simple docstring"""
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
UpperCamelCase , UpperCamelCase = self.add(A_ )
# the entire list of constraints are fulfilled
if self.completed:
break
def __UpperCamelCase ( self , A_ ) -> int:
"""simple docstring"""
if not isinstance(A_ , A_ ):
raise ValueError(F'''`token_id` should be an `int`, but is `{token_id}`.''' )
UpperCamelCase , UpperCamelCase = False, False
if self.completed:
UpperCamelCase = True
UpperCamelCase = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
UpperCamelCase , UpperCamelCase , UpperCamelCase = self.inprogress_constraint.update(A_ )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=A_ ) )
UpperCamelCase = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
UpperCamelCase = None
if len(self.pending_constraints ) == 0:
# we're done!
UpperCamelCase = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(A_ ):
UpperCamelCase , UpperCamelCase , UpperCamelCase = pending_constraint.update(A_ )
if not stepped:
raise Exception(
'`constraint.update(token_id)` is not yielding incremental progress, '
'even though `constraint.does_advance(token_id)` is true.' )
if complete:
self.complete_constraints.append(A_ )
UpperCamelCase = None
if not complete and stepped:
UpperCamelCase = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
UpperCamelCase = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
UpperCamelCase = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def __UpperCamelCase ( self , A_=True ) -> Tuple:
"""simple docstring"""
UpperCamelCase = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
UpperCamelCase = [
constraint.copy(stateful=A_ ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
UpperCamelCase = self.inprogress_constraint.copy(stateful=A_ )
UpperCamelCase = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 3 | 0 |
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowercase ( _SCREAMING_SNAKE_CASE ):
__lowercase : Union[str, Any] = ["image_processor", "tokenizer"]
__lowercase : int = "BlipImageProcessor"
__lowercase : Optional[int] = ("BertTokenizer", "BertTokenizerFast")
def __init__( self , A_ , A_ ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = False
super().__init__(__A , __A )
UpperCamelCase = self.image_processor
def __call__( self , A_ = None , A_ = None , A_ = True , A_ = False , A_ = None , A_ = None , A_ = 0 , A_ = None , A_ = None , A_ = False , A_ = False , A_ = False , A_ = False , A_ = False , A_ = True , A_ = None , **A_ , ) -> BatchEncoding:
"""simple docstring"""
if images is None and text is None:
raise ValueError('You have to specify either images or text.' )
# Get only text
if images is None:
UpperCamelCase = self.tokenizer
UpperCamelCase = self.tokenizer(
text=__A , add_special_tokens=__A , padding=__A , truncation=__A , max_length=__A , stride=__A , pad_to_multiple_of=__A , return_attention_mask=__A , return_overflowing_tokens=__A , return_special_tokens_mask=__A , return_offsets_mapping=__A , return_token_type_ids=__A , return_length=__A , verbose=__A , return_tensors=__A , **__A , )
return text_encoding
# add pixel_values
UpperCamelCase = self.image_processor(__A , return_tensors=__A )
if text is not None:
UpperCamelCase = self.tokenizer(
text=__A , add_special_tokens=__A , padding=__A , truncation=__A , max_length=__A , stride=__A , pad_to_multiple_of=__A , return_attention_mask=__A , return_overflowing_tokens=__A , return_special_tokens_mask=__A , return_offsets_mapping=__A , return_token_type_ids=__A , return_length=__A , verbose=__A , return_tensors=__A , **__A , )
else:
UpperCamelCase = None
if text_encoding is not None:
encoding_image_processor.update(__A )
return encoding_image_processor
def __UpperCamelCase ( self , *A_ , **A_ ) -> int:
"""simple docstring"""
return self.tokenizer.batch_decode(*__A , **__A )
def __UpperCamelCase ( self , *A_ , **A_ ) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.decode(*__A , **__A )
@property
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = self.tokenizer.model_input_names
UpperCamelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 717 |
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
_UpperCAmelCase : str = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
@register_to_config
def __init__( self , A_ , A_ = None , A_ = None ) -> Any:
"""simple docstring"""
super().__init__()
UpperCamelCase = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
UpperCamelCase = torch.zeros(A_ , A_ )
else:
UpperCamelCase = None
UpperCamelCase = torch.nn.Parameter(A_ )
class lowercase ( _SCREAMING_SNAKE_CASE ):
__lowercase : VQModel
__lowercase : CLIPTextModel
__lowercase : CLIPTokenizer
__lowercase : TransformeraDModel
__lowercase : LearnedClassifierFreeSamplingEmbeddings
__lowercase : VQDiffusionScheduler
def __init__( self , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
self.register_modules(
vqvae=A_ , transformer=A_ , text_encoder=A_ , tokenizer=A_ , scheduler=A_ , learned_classifier_free_sampling_embeddings=A_ , )
def __UpperCamelCase ( self , A_ , A_ , A_ ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = len(A_ ) if isinstance(A_ , A_ ) else 1
# get prompt text embeddings
UpperCamelCase = self.tokenizer(
A_ , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
UpperCamelCase = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
UpperCamelCase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
F''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
UpperCamelCase = text_input_ids[:, : self.tokenizer.model_max_length]
UpperCamelCase = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
UpperCamelCase = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=A_ )
# duplicate text embeddings for each generation per prompt
UpperCamelCase = prompt_embeds.repeat_interleave(A_ , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
UpperCamelCase = self.learned_classifier_free_sampling_embeddings.embeddings
UpperCamelCase = negative_prompt_embeds.unsqueeze(0 ).repeat(A_ , 1 , 1 )
else:
UpperCamelCase = [''] * batch_size
UpperCamelCase = text_input_ids.shape[-1]
UpperCamelCase = self.tokenizer(
A_ , padding='max_length' , max_length=A_ , truncation=A_ , return_tensors='pt' , )
UpperCamelCase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
UpperCamelCase = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=A_ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCamelCase = negative_prompt_embeds.shape[1]
UpperCamelCase = negative_prompt_embeds.repeat(1 , A_ , 1 )
UpperCamelCase = negative_prompt_embeds.view(batch_size * num_images_per_prompt , A_ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCamelCase = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self , A_ , A_ = 100 , A_ = 5.0 , A_ = 1.0 , A_ = 1 , A_ = None , A_ = None , A_ = "pil" , A_ = True , A_ = None , A_ = 1 , ) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
if isinstance(A_ , A_ ):
UpperCamelCase = 1
elif isinstance(A_ , A_ ):
UpperCamelCase = len(A_ )
else:
raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(A_ )}''' )
UpperCamelCase = batch_size * num_images_per_prompt
UpperCamelCase = guidance_scale > 1.0
UpperCamelCase = self._encode_prompt(A_ , A_ , A_ )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(A_ , A_ ) or callback_steps <= 0)
):
raise ValueError(
F'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
F''' {type(A_ )}.''' )
# get the initial completely masked latents unless the user supplied it
UpperCamelCase = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
UpperCamelCase = self.transformer.num_vector_embeds - 1
UpperCamelCase = torch.full(A_ , A_ ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'
F''' {self.transformer.num_vector_embeds - 1} (inclusive).''' )
UpperCamelCase = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(A_ , device=self.device )
UpperCamelCase = self.scheduler.timesteps.to(self.device )
UpperCamelCase = latents
for i, t in enumerate(self.progress_bar(A_ ) ):
# expand the sample if we are doing classifier free guidance
UpperCamelCase = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
UpperCamelCase = self.transformer(A_ , encoder_hidden_states=A_ , timestep=A_ ).sample
if do_classifier_free_guidance:
UpperCamelCase , UpperCamelCase = model_output.chunk(2 )
UpperCamelCase = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(A_ , dim=1 , keepdim=A_ )
UpperCamelCase = self.truncate(A_ , A_ )
# remove `log(0)`'s (`-inf`s)
UpperCamelCase = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase = self.scheduler.step(A_ , timestep=A_ , sample=A_ , generator=A_ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(A_ , A_ , A_ )
UpperCamelCase = self.vqvae.config.vq_embed_dim
UpperCamelCase = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
UpperCamelCase = self.vqvae.quantize.get_codebook_entry(A_ , shape=A_ )
UpperCamelCase = self.vqvae.decode(A_ , force_not_quantize=A_ ).sample
UpperCamelCase = (image / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCamelCase = self.numpy_to_pil(A_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A_ )
def __UpperCamelCase ( self , A_ , A_ ) -> torch.FloatTensor:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = torch.sort(A_ , 1 , descending=A_ )
UpperCamelCase = torch.exp(A_ )
UpperCamelCase = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
UpperCamelCase = torch.full_like(keep_mask[:, 0:1, :] , A_ )
UpperCamelCase = torch.cat((all_true, keep_mask) , dim=1 )
UpperCamelCase = keep_mask[:, :-1, :]
UpperCamelCase = keep_mask.gather(1 , indices.argsort(1 ) )
UpperCamelCase = log_p_x_0.clone()
UpperCamelCase = -torch.inf # -inf = log(0)
return rv
| 3 | 0 |
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class lowercase :
def __init__( self , A_ , A_ = 13 , A_ = 64 , A_ = 2 , A_ = 3 , A_ = 3 , A_ = True , A_ = True , A_ = 128 , A_=[16, 32, 64, 128] , A_ = 7 , A_ = 4 , A_ = 37 , A_ = "gelu" , A_ = 0.1 , A_ = 0.1 , A_ = 10 , A_ = 0.02 , A_ = 2 , A_ = 1 , A_ = 128 , A_ = [2, 2, 2, 2] , A_ = 2 , A_ = 2 , ) -> str:
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = image_size
UpperCamelCase = patch_size
UpperCamelCase = num_channels
UpperCamelCase = is_training
UpperCamelCase = use_labels
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = encoder_stride
UpperCamelCase = num_attention_outputs
UpperCamelCase = embed_dim
UpperCamelCase = embed_dim + 1
UpperCamelCase = resolution
UpperCamelCase = depths
UpperCamelCase = hidden_sizes
UpperCamelCase = dim
UpperCamelCase = mlp_expansion_ratio
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowercase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def __UpperCamelCase ( self , A_ , A_ , A_ ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = TFEfficientFormerModel(config=_lowercase )
UpperCamelCase = model(_lowercase , training=_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self , A_ , A_ , A_ ) -> Tuple:
"""simple docstring"""
UpperCamelCase = self.type_sequence_label_size
UpperCamelCase = TFEfficientFormerForImageClassification(_lowercase )
UpperCamelCase = model(_lowercase , labels=_lowercase , training=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCamelCase = 1
UpperCamelCase = TFEfficientFormerForImageClassification(_lowercase )
UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase = model(_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase = config_and_inputs
UpperCamelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class lowercase ( __snake_case , __snake_case , unittest.TestCase ):
__lowercase : Any = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
__lowercase : List[str] = (
{
"feature-extraction": TFEfficientFormerModel,
"image-classification": (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
__lowercase : Dict = False
__lowercase : Optional[Any] = False
__lowercase : Dict = False
__lowercase : Any = False
__lowercase : Union[str, Any] = False
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = TFEfficientFormerModelTester(self )
UpperCamelCase = ConfigTester(
self , config_class=_lowercase , has_text_modality=_lowercase , hidden_size=37 )
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='EfficientFormer does not use inputs_embeds' )
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip(reason='EfficientFormer does not support input and output embeddings' )
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
pass
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(_lowercase )
UpperCamelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase = [*signature.parameters.keys()]
UpperCamelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowercase )
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
def check_hidden_states_output(A_ , A_ , A_ ):
UpperCamelCase = model_class(_lowercase )
UpperCamelCase = model(**self._prepare_for_class(_lowercase , _lowercase ) , training=_lowercase )
UpperCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCamelCase = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_lowercase ) , _lowercase )
if hasattr(self.model_tester , 'encoder_seq_length' ):
UpperCamelCase = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , 'chunk_length' ) and self.model_tester.chunk_length > 1:
UpperCamelCase = seq_length * self.model_tester.chunk_length
else:
UpperCamelCase = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
UpperCamelCase = outputs.decoder_hidden_states
self.asseretIsInstance(_lowercase , (list, tuple) )
self.assertEqual(len(_lowercase ) , _lowercase )
UpperCamelCase = getattr(self.model_tester , 'seq_length' , _lowercase )
UpperCamelCase = getattr(self.model_tester , 'decoder_seq_length' , _lowercase )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase )
def __UpperCamelCase ( self , A_ , A_ , A_=False ) -> Any:
"""simple docstring"""
UpperCamelCase = super()._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
@unittest.skip(reason='EfficientFormer does not implement masked image modeling yet' )
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_lowercase )
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowercase )
@slow
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = TFEfficientFormerModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = True
UpperCamelCase = getattr(self.model_tester , 'seq_length' , _lowercase )
UpperCamelCase = getattr(self.model_tester , 'encoder_seq_length' , _lowercase )
UpperCamelCase = getattr(self.model_tester , 'key_length' , _lowercase )
UpperCamelCase = getattr(self.model_tester , 'chunk_length' , _lowercase )
if chunk_length is not None and hasattr(self.model_tester , 'num_hashes' ):
UpperCamelCase = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
UpperCamelCase = True
UpperCamelCase = False
UpperCamelCase = True
UpperCamelCase = model_class(_lowercase )
UpperCamelCase = model(**self._prepare_for_class(_lowercase , _lowercase ) , training=_lowercase )
UpperCamelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_lowercase ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCamelCase = True
UpperCamelCase = model_class(_lowercase )
UpperCamelCase = model(**self._prepare_for_class(_lowercase , _lowercase ) , training=_lowercase )
UpperCamelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_lowercase ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
UpperCamelCase = model_class(_lowercase )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
UpperCamelCase = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=_lowercase )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
UpperCamelCase = model(_lowercase )
self.assertTrue(outputs_dict is not None )
def A ( ) -> Tuple:
'''simple docstring'''
UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class lowercase ( unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
return (
EfficientFormerImageProcessor.from_pretrained('snap-research/efficientformer-l1-300' )
if is_vision_available()
else None
)
@slow
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = TFEfficientFormerForImageClassification.from_pretrained('snap-research/efficientformer-l1-300' )
UpperCamelCase = self.default_image_processor
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=_lowercase , return_tensors='tf' )
# forward pass
UpperCamelCase = model(**_lowercase , training=_lowercase )
# verify the logits
UpperCamelCase = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , _lowercase )
UpperCamelCase = tf.constant([-0.0555, 0.4825, -0.0852] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , _lowercase , atol=1e-4 ) )
@slow
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
UpperCamelCase = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
'snap-research/efficientformer-l1-300' )
UpperCamelCase = self.default_image_processor
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=_lowercase , return_tensors='tf' )
# forward pass
UpperCamelCase = model(**_lowercase , training=_lowercase )
# verify the logits
UpperCamelCase = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , _lowercase )
UpperCamelCase = tf.constant([-0.1312, 0.4353, -1.0499] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , _lowercase , atol=1e-4 ) )
| 718 |
from string import ascii_uppercase
_UpperCAmelCase : Dict = {char: i for i, char in enumerate(ascii_uppercase)}
_UpperCAmelCase : Tuple = dict(enumerate(ascii_uppercase))
def A ( lowercase , lowercase ) -> str:
'''simple docstring'''
UpperCamelCase = len(lowercase )
UpperCamelCase = 0
while True:
if x == i:
UpperCamelCase = 0
if len(lowercase ) == len(lowercase ):
break
key += key[i]
i += 1
return key
def A ( lowercase , lowercase ) -> str:
'''simple docstring'''
UpperCamelCase = ''
UpperCamelCase = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
UpperCamelCase = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def A ( lowercase , lowercase ) -> str:
'''simple docstring'''
UpperCamelCase = ''
UpperCamelCase = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
UpperCamelCase = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def A ( ) -> None:
'''simple docstring'''
UpperCamelCase = 'THE GERMAN ATTACK'
UpperCamelCase = 'SECRET'
UpperCamelCase = generate_key(lowercase , lowercase )
UpperCamelCase = cipher_text(lowercase , lowercase )
print(f'''Encrypted Text = {s}''' )
print(f'''Original Text = {original_text(lowercase , lowercase )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 3 | 0 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue_model_parallelism.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 1_600, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 1_600, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
] )
class lowercase ( unittest.TestCase ):
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
F'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding='utf-8' , check=A__ , )
assert hasattr(self , 'env' )
def __UpperCamelCase ( self , A_ ) -> Optional[int]:
"""simple docstring"""
# configuration for running training on smdistributed Model Parallel
UpperCamelCase = {
"""enabled""": True,
"""processes_per_host""": 8,
}
UpperCamelCase = {
"""enabled""": True,
"""parameters""": {
"""microbatches""": 4,
"""placement_strategy""": """spread""",
"""pipeline""": """interleaved""",
"""optimize""": """speed""",
"""partitions""": 4,
"""ddp""": True,
},
}
UpperCamelCase = {"""smdistributed""": {"""modelparallel""": smp_options}, """mpi""": mpi_options}
UpperCamelCase = """trainer""" if self.script == """run_glue.py""" else """smtrainer"""
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F'''{self.env.base_job_name}-{instance_count}-smp-{name_extension}''' , instance_count=A__ , instance_type=self.instance_type , debugger_hook_config=A__ , hyperparameters={
**self.env.hyperparameters,
'model_name_or_path': self.model_name_or_path,
'max_steps': 500,
} , metric_definitions=self.env.metric_definitions , distribution=A__ , py_version='py36' , )
def __UpperCamelCase ( self , A_ ) -> List[str]:
"""simple docstring"""
TrainingJobAnalytics(A__ ).export_csv(F'''{self.env.test_path}/{job_name}_metrics.csv''' )
@parameterized.expand([(1,)] )
def __UpperCamelCase ( self , A_ ) -> Optional[Any]:
"""simple docstring"""
# create estimator
UpperCamelCase = self.create_estimator(A__ )
# run training
estimator.fit()
# result dataframe
UpperCamelCase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
UpperCamelCase = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] )
UpperCamelCase = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
UpperCamelCase = (
Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds' , 999_999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy )
assert all(t <= self.results['eval_loss'] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F'''{estimator.latest_training_job.name}.json''' , 'w' ) as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} , A__ )
| 719 |
from collections.abc import Callable
def A ( lowercase , lowercase , lowercase ) -> float:
'''simple docstring'''
UpperCamelCase = a
UpperCamelCase = b
if function(lowercase ) == 0: # one of the a or b is a root for the function
return a
elif function(lowercase ) == 0:
return b
elif (
function(lowercase ) * function(lowercase ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('could not find root in given interval.' )
else:
UpperCamelCase = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(lowercase ) == 0:
return mid
elif function(lowercase ) * function(lowercase ) < 0:
UpperCamelCase = mid
else:
UpperCamelCase = mid
UpperCamelCase = start + (end - start) / 2.0
return mid
def A ( lowercase ) -> float:
'''simple docstring'''
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1_000))
import doctest
doctest.testmod()
| 3 | 0 |
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class lowercase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
__lowercase : str = CTRLTokenizer
__lowercase : Any = False
__lowercase : Optional[int] = False
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCamelCase = ['adapt', 're@@', 'a@@', 'apt', 'c@@', 't', '<unk>']
UpperCamelCase = dict(zip(_a , range(len(_a ) ) ) )
UpperCamelCase = ['#version: 0.2', 'a p', 'ap t</w>', 'r e', 'a d', 'ad apt</w>', '']
UpperCamelCase = {'unk_token': '<unk>'}
UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_a ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_a ) )
def __UpperCamelCase ( self , **A_ ) -> Optional[int]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **_a )
def __UpperCamelCase ( self , A_ ) -> int:
"""simple docstring"""
UpperCamelCase = 'adapt react readapt apt'
UpperCamelCase = 'adapt react readapt apt'
return input_text, output_text
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
UpperCamelCase = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
UpperCamelCase = 'adapt react readapt apt'
UpperCamelCase = 'adapt re@@ a@@ c@@ t re@@ adapt apt'.split()
UpperCamelCase = tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
UpperCamelCase = tokens + [tokenizer.unk_token]
UpperCamelCase = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , _a )
| 720 |
import os
_UpperCAmelCase : int = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 100, "D": 500, "M": 1_000}
def A ( lowercase ) -> int:
'''simple docstring'''
UpperCamelCase = 0
UpperCamelCase = 0
while index < len(lowercase ) - 1:
UpperCamelCase = SYMBOLS[numerals[index]]
UpperCamelCase = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def A ( lowercase ) -> str:
'''simple docstring'''
UpperCamelCase = ''
UpperCamelCase = num // 1_000
numerals += m_count * "M"
num %= 1_000
UpperCamelCase = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
UpperCamelCase = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def A ( lowercase = "/p089_roman.txt" ) -> int:
'''simple docstring'''
UpperCamelCase = 0
with open(os.path.dirname(lowercase ) + roman_numerals_filename ) as filea:
UpperCamelCase = filea.readlines()
for line in lines:
UpperCamelCase = line.strip()
UpperCamelCase = parse_roman_numerals(lowercase )
UpperCamelCase = generate_roman_numerals(lowercase )
savings += len(lowercase ) - len(lowercase )
return savings
if __name__ == "__main__":
print(F'''{solution() = }''')
| 3 | 0 |
from math import isqrt
def A ( lowercase ) -> int:
'''simple docstring'''
return all(number % divisor != 0 for divisor in range(2 , isqrt(_lowerCamelCase ) + 1 ) )
def A ( lowercase = 10**6 ) -> str:
'''simple docstring'''
UpperCamelCase = 0
UpperCamelCase = 1
UpperCamelCase = 7
while prime_candidate < max_prime:
primes_count += is_prime(_lowerCamelCase )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 721 |
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize('dataset_size' , [None, 400 * 2**20, 600 * 2**20] )
@pytest.mark.parametrize('input_in_memory_max_size' , ['default', 0, 100 * 2**20, 900 * 2**20] )
def A ( lowercase , lowercase , lowercase ) -> Union[str, Any]:
'''simple docstring'''
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , 'IN_MEMORY_MAX_SIZE' , lowercase )
UpperCamelCase = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
UpperCamelCase = dataset_size < in_memory_max_size
else:
UpperCamelCase = False
UpperCamelCase = is_small_dataset(lowercase )
assert result == expected
| 3 | 0 |
from ...configuration_utils import PretrainedConfig
class lowercase ( snake_case_ ):
__lowercase : Dict = """bert-generation"""
def __init__( self , A_=50_358 , A_=1_024 , A_=24 , A_=16 , A_=4_096 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=0.02 , A_=1e-12 , A_=0 , A_=2 , A_=1 , A_="absolute" , A_=True , **A_ , ) -> List[str]:
"""simple docstring"""
super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ )
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = hidden_act
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
UpperCamelCase = position_embedding_type
UpperCamelCase = use_cache
| 700 |
def A ( lowercase , lowercase ) -> str:
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
UpperCamelCase = str(bin(lowercase ) )[2:] # remove the leading "0b"
UpperCamelCase = str(bin(lowercase ) )[2:] # remove the leading "0b"
UpperCamelCase = max(len(lowercase ) , len(lowercase ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(lowercase ) , b_binary.zfill(lowercase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 3 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
_UpperCAmelCase : str = logging.get_logger(__name__)
_UpperCAmelCase : Tuple = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
# See all BART models at https://huggingface.co/models?filter=bart
_UpperCAmelCase : Optional[Any] = {
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
"tokenizer_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json",
},
}
_UpperCAmelCase : List[Any] = {
"facebook/bart-base": 1_024,
"facebook/bart-large": 1_024,
"facebook/bart-large-mnli": 1_024,
"facebook/bart-large-cnn": 1_024,
"facebook/bart-large-xsum": 1_024,
"yjernite/bart_eli5": 1_024,
}
class lowercase ( _UpperCAmelCase ):
__lowercase : Optional[Any] = VOCAB_FILES_NAMES
__lowercase : Any = PRETRAINED_VOCAB_FILES_MAP
__lowercase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : Any = ["input_ids", "attention_mask"]
__lowercase : Union[str, Any] = BartTokenizer
def __init__( self , A_=None , A_=None , A_=None , A_="replace" , A_="<s>" , A_="</s>" , A_="</s>" , A_="<s>" , A_="<unk>" , A_="<pad>" , A_="<mask>" , A_=False , A_=True , **A_ , ) -> Optional[int]:
"""simple docstring"""
super().__init__(
lowercase__ , lowercase__ , tokenizer_file=lowercase__ , errors=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , sep_token=lowercase__ , cls_token=lowercase__ , unk_token=lowercase__ , pad_token=lowercase__ , mask_token=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ , **lowercase__ , )
UpperCamelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , lowercase__ ) != add_prefix_space:
UpperCamelCase = getattr(lowercase__ , pre_tok_state.pop('type' ) )
UpperCamelCase = add_prefix_space
UpperCamelCase = pre_tok_class(**lowercase__ )
UpperCamelCase = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
UpperCamelCase = "post_processor"
UpperCamelCase = getattr(self.backend_tokenizer , lowercase__ , lowercase__ )
if tokenizer_component_instance:
UpperCamelCase = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
UpperCamelCase = tuple(state['sep'] )
if "cls" in state:
UpperCamelCase = tuple(state['cls'] )
UpperCamelCase = False
if state.get('add_prefix_space' , lowercase__ ) != add_prefix_space:
UpperCamelCase = add_prefix_space
UpperCamelCase = True
if state.get('trim_offsets' , lowercase__ ) != trim_offsets:
UpperCamelCase = trim_offsets
UpperCamelCase = True
if changes_to_apply:
UpperCamelCase = getattr(lowercase__ , state.pop('type' ) )
UpperCamelCase = component_class(**lowercase__ )
setattr(self.backend_tokenizer , lowercase__ , lowercase__ )
@property
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def __UpperCamelCase ( self , A_ ) -> int:
"""simple docstring"""
UpperCamelCase = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else value
UpperCamelCase = value
def __UpperCamelCase ( self , *A_ , **A_ ) -> Tuple:
"""simple docstring"""
UpperCamelCase = kwargs.get('is_split_into_words' , lowercase__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*lowercase__ , **lowercase__ )
def __UpperCamelCase ( self , *A_ , **A_ ) -> Dict:
"""simple docstring"""
UpperCamelCase = kwargs.get('is_split_into_words' , lowercase__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'to use it with pretokenized inputs.' )
return super()._encode_plus(*lowercase__ , **lowercase__ )
def __UpperCamelCase ( self , A_ , A_ = None ) -> str:
"""simple docstring"""
UpperCamelCase = self._tokenizer.model.save(lowercase__ , name=lowercase__ )
return tuple(lowercase__ )
def __UpperCamelCase ( self , A_ , A_=None ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __UpperCamelCase ( self , A_ , A_ = None ) -> str:
"""simple docstring"""
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 701 |
import re
def A ( lowercase ) -> str:
'''simple docstring'''
if len(re.findall('[ATCG]' , lowercase ) ) != len(lowercase ):
raise ValueError('Invalid Strand' )
return dna.translate(dna.maketrans('ATCG' , 'TAGC' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 3 | 0 |
_UpperCAmelCase : Union[str, Any] = "Alexander Joslin"
import operator as op
from .stack import Stack
def A ( lowercase ) -> int:
'''simple docstring'''
UpperCamelCase = {'*': op.mul, '/': op.truediv, '+': op.add, '-': op.sub}
UpperCamelCase = Stack()
UpperCamelCase = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(SCREAMING_SNAKE_CASE_ ) )
elif i in operators:
# RULE 2
operator_stack.push(SCREAMING_SNAKE_CASE_ )
elif i == ")":
# RULE 4
UpperCamelCase = operator_stack.peek()
operator_stack.pop()
UpperCamelCase = operand_stack.peek()
operand_stack.pop()
UpperCamelCase = operand_stack.peek()
operand_stack.pop()
UpperCamelCase = operators[opr](SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
operand_stack.push(SCREAMING_SNAKE_CASE_ )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
_UpperCAmelCase : str = "(5 + ((4 * 2) * (2 + 3)))"
# answer = 45
print(F'''{equation} = {dijkstras_two_stack_algorithm(equation)}''') | 702 |
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class lowercase ( _SCREAMING_SNAKE_CASE ):
__lowercase : Dict = (DDPMScheduler,)
def __UpperCamelCase ( self , **A_ ) -> Dict:
"""simple docstring"""
UpperCamelCase = {
'num_train_timesteps': 1_000,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'variance_type': 'fixed_small',
'clip_sample': True,
}
config.update(**A_ )
return config
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=A_ )
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=A_ , beta_end=A_ )
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=A_ )
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=A_ )
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=A_ )
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
self.check_over_configs(thresholding=A_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=A_ , prediction_type=A_ , sample_max_value=A_ , )
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=A_ )
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
for t in [0, 500, 999]:
self.check_over_forward(time_step=A_ )
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**A_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1e-5
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**A_ )
UpperCamelCase = len(A_ )
UpperCamelCase = self.dummy_model()
UpperCamelCase = self.dummy_sample_deter
UpperCamelCase = torch.manual_seed(0 )
for t in reversed(range(A_ ) ):
# 1. predict noise residual
UpperCamelCase = model(A_ , A_ )
# 2. predict previous mean of sample x_t-1
UpperCamelCase = scheduler.step(A_ , A_ , A_ , generator=A_ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
UpperCamelCase = pred_prev_sample
UpperCamelCase = torch.sum(torch.abs(A_ ) )
UpperCamelCase = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 258.9606 ) < 1e-2
assert abs(result_mean.item() - 0.3372 ) < 1e-3
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config(prediction_type='v_prediction' )
UpperCamelCase = scheduler_class(**A_ )
UpperCamelCase = len(A_ )
UpperCamelCase = self.dummy_model()
UpperCamelCase = self.dummy_sample_deter
UpperCamelCase = torch.manual_seed(0 )
for t in reversed(range(A_ ) ):
# 1. predict noise residual
UpperCamelCase = model(A_ , A_ )
# 2. predict previous mean of sample x_t-1
UpperCamelCase = scheduler.step(A_ , A_ , A_ , generator=A_ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
UpperCamelCase = pred_prev_sample
UpperCamelCase = torch.sum(torch.abs(A_ ) )
UpperCamelCase = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 202.0296 ) < 1e-2
assert abs(result_mean.item() - 0.2631 ) < 1e-3
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**A_ )
UpperCamelCase = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=A_ )
UpperCamelCase = scheduler.timesteps
for i, timestep in enumerate(A_ ):
if i == len(A_ ) - 1:
UpperCamelCase = -1
else:
UpperCamelCase = timesteps[i + 1]
UpperCamelCase = scheduler.previous_timestep(A_ )
UpperCamelCase = prev_t.item()
self.assertEqual(A_ , A_ )
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**A_ )
UpperCamelCase = [100, 87, 50, 51, 0]
with self.assertRaises(A_ , msg='`custom_timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=A_ )
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**A_ )
UpperCamelCase = [100, 87, 50, 1, 0]
UpperCamelCase = len(A_ )
with self.assertRaises(A_ , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=A_ , timesteps=A_ )
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**A_ )
UpperCamelCase = [scheduler.config.num_train_timesteps]
with self.assertRaises(
A_ , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=A_ )
| 3 | 0 |
def A ( lowercase = 50_000_000 ) -> Tuple:
'''simple docstring'''
UpperCamelCase = set()
UpperCamelCase = int((limit - 24) ** (1 / 2) )
UpperCamelCase = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , __lowercase ) ) )
for primea in primes:
UpperCamelCase = primea * primea
for primea in primes:
UpperCamelCase = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
UpperCamelCase = primea * primea * primea * primea
UpperCamelCase = square + cube + tetr
if total >= limit:
break
ret.add(__lowercase )
return len(__lowercase )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 703 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
_UpperCAmelCase : List[str] = None
_UpperCAmelCase : Any = logging.get_logger(__name__)
_UpperCAmelCase : Tuple = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
_UpperCAmelCase : List[str] = {
"vocab_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model",
},
"tokenizer_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/tokenizer.json",
},
}
_UpperCAmelCase : Optional[int] = {
"camembert-base": 512,
}
_UpperCAmelCase : Union[str, Any] = "▁"
class lowercase ( _SCREAMING_SNAKE_CASE ):
__lowercase : str = VOCAB_FILES_NAMES
__lowercase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
__lowercase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : List[str] = ["input_ids", "attention_mask"]
__lowercase : Tuple = CamembertTokenizer
def __init__( self , A_=None , A_=None , A_="<s>" , A_="</s>" , A_="</s>" , A_="<s>" , A_="<unk>" , A_="<pad>" , A_="<mask>" , A_=["<s>NOTUSED", "</s>NOTUSED"] , **A_ , ) -> List[Any]:
"""simple docstring"""
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else mask_token
super().__init__(
A_ , tokenizer_file=A_ , bos_token=A_ , eos_token=A_ , sep_token=A_ , cls_token=A_ , unk_token=A_ , pad_token=A_ , mask_token=A_ , additional_special_tokens=A_ , **A_ , )
UpperCamelCase = vocab_file
UpperCamelCase = False if not self.vocab_file else True
def __UpperCamelCase ( self , A_ , A_ = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
UpperCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __UpperCamelCase ( self , A_ , A_ = None ) -> List[int]:
"""simple docstring"""
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCamelCase ( self , A_ , A_ = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(A_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCamelCase = os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ):
copyfile(self.vocab_file , A_ )
return (out_vocab_file,)
| 3 | 0 |
from collections import namedtuple
import requests
from lxml import html # type: ignore
_UpperCAmelCase : Optional[int] = namedtuple("covid_data", "cases deaths recovered")
def A ( lowercase = "https://www.worldometers.info/coronavirus/" ) -> List[str]:
'''simple docstring'''
UpperCamelCase = '//div[@class = "maincounter-number"]/span/text()'
return covid_data(*html.fromstring(requests.get(__snake_case ).content ).xpath(__snake_case ) )
_UpperCAmelCase : str = "Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}"
print(fmt.format(*covid_stats()))
| 704 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCAmelCase : Union[str, Any] = {
"configuration_git": ["GIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "GitConfig", "GitVisionConfig"],
"processing_git": ["GitProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Dict = [
"GIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"GitForCausalLM",
"GitModel",
"GitPreTrainedModel",
"GitVisionModel",
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
_UpperCAmelCase : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 3 | 0 |
import os
from pathlib import Path
def A ( ) -> str:
'''simple docstring'''
from torch.utils.cpp_extension import load
UpperCamelCase = Path(lowerCAmelCase__ ).resolve().parent.parent.parent / 'kernels' / 'deformable_detr'
UpperCamelCase = [
root / filename
for filename in [
'vision.cpp',
os.path.join('cpu' , 'ms_deform_attn_cpu.cpp' ),
os.path.join('cuda' , 'ms_deform_attn_cuda.cu' ),
]
]
load(
'MultiScaleDeformableAttention' , lowerCAmelCase__ , with_cuda=lowerCAmelCase__ , extra_include_paths=[str(lowerCAmelCase__ )] , extra_cflags=['-DWITH_CUDA=1'] , extra_cuda_cflags=[
'-DCUDA_HAS_FP16=1',
'-D__CUDA_NO_HALF_OPERATORS__',
'-D__CUDA_NO_HALF_CONVERSIONS__',
'-D__CUDA_NO_HALF2_OPERATORS__',
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 705 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCAmelCase : Tuple = logging.get_logger(__name__)
_UpperCAmelCase : Union[str, Any] = {
"facebook/data2vec-text-base": "https://huggingface.co/data2vec/resolve/main/config.json",
}
class lowercase ( _SCREAMING_SNAKE_CASE ):
__lowercase : Dict = "data2vec-text"
def __init__( self , A_=30_522 , A_=768 , A_=12 , A_=12 , A_=3_072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=2 , A_=0.02 , A_=1e-12 , A_=1 , A_=0 , A_=2 , A_="absolute" , A_=True , A_=None , **A_ , ) -> Any:
"""simple docstring"""
super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ )
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = hidden_act
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
UpperCamelCase = position_embedding_type
UpperCamelCase = use_cache
UpperCamelCase = classifier_dropout
class lowercase ( _SCREAMING_SNAKE_CASE ):
@property
def __UpperCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
UpperCamelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
UpperCamelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 3 | 0 |
def A ( lowercase ) -> str:
'''simple docstring'''
UpperCamelCase = generate_pascal_triangle(lowerCAmelCase_ )
for row_idx in range(lowerCAmelCase_ ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=' ' )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=' ' )
else:
print(triangle[row_idx][col_idx] , end='' )
print()
def A ( lowercase ) -> Tuple:
'''simple docstring'''
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
raise TypeError('The input value of \'num_rows\' should be \'int\'' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'The input value of \'num_rows\' should be greater than or equal to 0' )
UpperCamelCase = []
for current_row_idx in range(lowerCAmelCase_ ):
UpperCamelCase = populate_current_row(lowerCAmelCase_ , lowerCAmelCase_ )
triangle.append(lowerCAmelCase_ )
return triangle
def A ( lowercase , lowercase ) -> int:
'''simple docstring'''
UpperCamelCase = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
UpperCamelCase = 1, 1
for current_col_idx in range(1 , lowerCAmelCase_ ):
calculate_current_element(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
return current_row
def A ( lowercase , lowercase , lowercase , lowercase , ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase = triangle[current_row_idx - 1][current_col_idx - 1]
UpperCamelCase = triangle[current_row_idx - 1][current_col_idx]
UpperCamelCase = above_to_left_elt + above_to_right_elt
def A ( lowercase ) -> Any:
'''simple docstring'''
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
raise TypeError('The input value of \'num_rows\' should be \'int\'' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'The input value of \'num_rows\' should be greater than or equal to 0' )
UpperCamelCase = [[1]]
for row_index in range(1 , lowerCAmelCase_ ):
UpperCamelCase = [0] + result[-1] + [0]
UpperCamelCase = row_index + 1
# Calculate the number of distinct elements in a row
UpperCamelCase = sum(divmod(lowerCAmelCase_ , 2 ) )
UpperCamelCase = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
UpperCamelCase = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
UpperCamelCase = row_first_half + row_second_half
result.append(lowerCAmelCase_ )
return result
def A ( ) -> int:
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(lowercase , lowercase ) -> None:
UpperCamelCase = f'''{func.__name__}({value})'''
UpperCamelCase = timeit(f'''__main__.{call}''' , setup='import __main__' )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(f'''{call:38} -- {timing:.4f} seconds''' )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(lowerCAmelCase_ , lowerCAmelCase_ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 706 |
from random import shuffle
import tensorflow as tf
from numpy import array
def A ( lowercase , lowercase ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase = int(lowercase )
assert noofclusters < len(lowercase )
# Find out the dimensionality
UpperCamelCase = len(vectors[0] )
# Will help select random centroids from among the available vectors
UpperCamelCase = list(range(len(lowercase ) ) )
shuffle(lowercase )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
UpperCamelCase = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
UpperCamelCase = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
UpperCamelCase = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(lowercase )
]
##These nodes will assign the centroid Variables the appropriate
##values
UpperCamelCase = tf.placeholder('float64' , [dim] )
UpperCamelCase = []
for centroid in centroids:
cent_assigns.append(tf.assign(lowercase , lowercase ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
UpperCamelCase = [tf.Variable(0 ) for i in range(len(lowercase ) )]
##These nodes will assign an assignment Variable the appropriate
##value
UpperCamelCase = tf.placeholder('int32' )
UpperCamelCase = []
for assignment in assignments:
cluster_assigns.append(tf.assign(lowercase , lowercase ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
UpperCamelCase = tf.placeholder('float' , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
UpperCamelCase = tf.reduce_mean(lowercase , 0 )
##Node for computing Euclidean distances
# Placeholders for input
UpperCamelCase = tf.placeholder('float' , [dim] )
UpperCamelCase = tf.placeholder('float' , [dim] )
UpperCamelCase = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(lowercase , lowercase ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
UpperCamelCase = tf.placeholder('float' , [noofclusters] )
UpperCamelCase = tf.argmin(lowercase , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
UpperCamelCase = tf.initialize_all_variables()
# Initialize all variables
sess.run(lowercase )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
UpperCamelCase = 100
for _ in range(lowercase ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(lowercase ) ):
UpperCamelCase = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
UpperCamelCase = [
sess.run(lowercase , feed_dict={va: vect, va: sess.run(lowercase )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
UpperCamelCase = sess.run(
lowercase , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(lowercase ):
# Collect all the vectors assigned to this cluster
UpperCamelCase = [
vectors[i]
for i in range(len(lowercase ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
UpperCamelCase = sess.run(
lowercase , feed_dict={mean_input: array(lowercase )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
UpperCamelCase = sess.run(lowercase )
UpperCamelCase = sess.run(lowercase )
return centroids, assignments
| 3 | 0 |
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
_UpperCAmelCase : List[str] = abspath(join(dirname(dirname(__file__)), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def A ( lowercase ) -> Optional[Any]:
'''simple docstring'''
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_snake_case )
def A ( lowercase ) -> Dict:
'''simple docstring'''
from diffusers.utils.testing_utils import pytest_terminal_summary_main
UpperCamelCase = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(_snake_case , id=_snake_case )
| 707 |
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
_UpperCAmelCase : Tuple = _symbol_database.Default()
_UpperCAmelCase : List[Any] = _descriptor_pool.Default().AddSerializedFile(
b"\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03"
)
_UpperCAmelCase : int = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, "sentencepiece_model_pb2", _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
_UpperCAmelCase : int = None
_UpperCAmelCase : List[str] = b"H\003"
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
_UpperCAmelCase : Optional[Any] = 45
_UpperCAmelCase : Any = 1_581
_UpperCAmelCase : Tuple = 1_517
_UpperCAmelCase : List[str] = 1_570
_UpperCAmelCase : int = 1_584
_UpperCAmelCase : List[Any] = 1_793
_UpperCAmelCase : Optional[int] = 1_795
_UpperCAmelCase : Any = 1_916
_UpperCAmelCase : Tuple = 1_864
_UpperCAmelCase : List[Any] = 1_905
_UpperCAmelCase : Union[str, Any] = 1_919
_UpperCAmelCase : str = 2_429
_UpperCAmelCase : Any = 2_208
_UpperCAmelCase : Dict = 2_418
_UpperCAmelCase : Optional[Any] = 2_323
_UpperCAmelCase : Tuple = 2_407
# @@protoc_insertion_point(module_scope)
| 3 | 0 |
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
_UpperCAmelCase : int = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
)
parser.add_argument(
"--original_config_file",
type=str,
required=True,
help="The YAML config file corresponding to the original architecture.",
)
parser.add_argument(
"--num_in_channels",
default=None,
type=int,
help="The number of input channels. If `None` number of input channels will be automatically inferred.",
)
parser.add_argument(
"--image_size",
default=512,
type=int,
help=(
"The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"
" Base. Use 768 for Stable Diffusion v2."
),
)
parser.add_argument(
"--extract_ema",
action="store_true",
help=(
"Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"
" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"
" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."
),
)
parser.add_argument(
"--upcast_attention",
action="store_true",
help=(
"Whether the attention computation should always be upcasted. This is necessary when running stable"
" diffusion 2.1."
),
)
parser.add_argument(
"--from_safetensors",
action="store_true",
help="If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.",
)
parser.add_argument(
"--to_safetensors",
action="store_true",
help="Whether to store pipeline in safetensors format or not.",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)")
def A ( lowercase ) -> Optional[int]:
'''simple docstring'''
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(f'''could not parse string as bool {string}''' )
parser.add_argument(
"--use_linear_projection", help="Override for use linear projection", required=False, type=parse_bool
)
parser.add_argument("--cross_attention_dim", help="Override for cross attention_dim", required=False, type=int)
_UpperCAmelCase : List[Any] = parser.parse_args()
_UpperCAmelCase : str = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 708 |
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class lowercase ( unittest.TestCase ):
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
# A mock response for an HTTP head request to emulate server down
UpperCamelCase = mock.Mock()
UpperCamelCase = 500
UpperCamelCase = {}
UpperCamelCase = HTTPError
UpperCamelCase = {}
# Download this model to make sure it's in the cache.
UpperCamelCase = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=A_ ) as mock_head:
UpperCamelCase = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
# A mock response for an HTTP head request to emulate server down
UpperCamelCase = mock.Mock()
UpperCamelCase = 500
UpperCamelCase = {}
UpperCamelCase = HTTPError
UpperCamelCase = {}
# Download this model to make sure it's in the cache.
UpperCamelCase = GPTaTokenizerFast.from_pretrained('gpt2' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=A_ ) as mock_head:
UpperCamelCase = GPTaTokenizerFast.from_pretrained('gpt2' )
# This check we did call the fake head request
mock_head.assert_called()
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
# This test is for deprecated behavior and can be removed in v5
try:
UpperCamelCase = tempfile.mktemp()
with open(A_ , 'wb' ) as f:
http_get('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' , A_ )
UpperCamelCase = AlbertTokenizer.from_pretrained(A_ )
finally:
os.remove(A_ )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile('tokenizer.json' ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open('tokenizer.json' , 'wb' ) as f:
http_get('https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json' , A_ )
UpperCamelCase = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size , 1_000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove('tokenizer.json' )
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
# This test is for deprecated behavior and can be removed in v5
UpperCamelCase = AlbertTokenizer.from_pretrained('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' )
@is_staging_test
class lowercase ( unittest.TestCase ):
__lowercase : int = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def __UpperCamelCase ( cls ) -> Tuple:
"""simple docstring"""
UpperCamelCase = TOKEN
HfFolder.save_token(A_ )
@classmethod
def __UpperCamelCase ( cls ) -> Optional[int]:
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id='test-tokenizer' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-tokenizer-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-tokenizer' )
except HTTPError:
pass
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase = os.path.join(A_ , 'vocab.txt' )
with open(A_ , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
UpperCamelCase = BertTokenizer(A_ )
tokenizer.push_to_hub('test-tokenizer' , use_auth_token=self._token )
UpperCamelCase = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id='test-tokenizer' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(A_ , repo_id='test-tokenizer' , push_to_hub=A_ , use_auth_token=self._token )
UpperCamelCase = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase = os.path.join(A_ , 'vocab.txt' )
with open(A_ , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
UpperCamelCase = BertTokenizer(A_ )
tokenizer.push_to_hub('valid_org/test-tokenizer-org' , use_auth_token=self._token )
UpperCamelCase = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-tokenizer-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
A_ , repo_id='valid_org/test-tokenizer-org' , push_to_hub=A_ , use_auth_token=self._token )
UpperCamelCase = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
@require_tokenizers
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase = os.path.join(A_ , 'vocab.txt' )
with open(A_ , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
UpperCamelCase = CustomTokenizer(A_ )
# No fast custom tokenizer
tokenizer.push_to_hub('test-dynamic-tokenizer' , use_auth_token=self._token )
UpperCamelCase = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=A_ )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizer' )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase = os.path.join(A_ , 'vocab.txt' )
with open(A_ , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
UpperCamelCase = BertTokenizerFast.from_pretrained(A_ )
bert_tokenizer.save_pretrained(A_ )
UpperCamelCase = CustomTokenizerFast.from_pretrained(A_ )
tokenizer.push_to_hub('test-dynamic-tokenizer' , use_auth_token=self._token )
UpperCamelCase = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=A_ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizerFast' )
UpperCamelCase = AutoTokenizer.from_pretrained(
F'''{USER}/test-dynamic-tokenizer''' , use_fast=A_ , trust_remote_code=A_ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizer' )
class lowercase ( unittest.TestCase ):
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = Trie()
trie.add('Hello 友達' )
self.assertEqual(trie.data , {'H': {'e': {'l': {'l': {'o': {' ': {'友': {'達': {'': 1}}}}}}}}} )
trie.add('Hello' )
trie.data
self.assertEqual(trie.data , {'H': {'e': {'l': {'l': {'o': {'': 1, ' ': {'友': {'達': {'': 1}}}}}}}}} )
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
UpperCamelCase = Trie()
self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) , ['[CLS] This is a extra_id_100'] )
trie.add('[CLS]' )
trie.add('extra_id_1' )
trie.add('extra_id_100' )
self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) , ['[CLS]', ' This is a ', 'extra_id_100'] )
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = Trie()
trie.add('A' )
self.assertEqual(trie.split('ABC' ) , ['A', 'BC'] )
self.assertEqual(trie.split('BCA' ) , ['BC', 'A'] )
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = Trie()
trie.add('TOKEN]' )
trie.add('[SPECIAL_TOKEN]' )
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) , ['This is something ', '[SPECIAL_TOKEN]'] )
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = Trie()
trie.add('A' )
trie.add('P' )
trie.add('[SPECIAL_TOKEN]' )
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) , ['This is something ', '[SPECIAL_TOKEN]'] )
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = Trie()
trie.add('AB' )
trie.add('B' )
trie.add('C' )
self.assertEqual(trie.split('ABC' ) , ['AB', 'C'] )
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = Trie()
trie.add('ABC' )
trie.add('B' )
trie.add('CD' )
self.assertEqual(trie.split('ABCD' ) , ['ABC', 'D'] )
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
# Even if the offsets are wrong, we necessarily output correct string
# parts.
UpperCamelCase = Trie()
UpperCamelCase = trie.cut_text('ABC' , [0, 0, 2, 1, 2, 3] )
self.assertEqual(A_ , ['AB', 'C'] )
| 3 | 0 |
from __future__ import annotations
def A ( lowercase ) -> Dict:
'''simple docstring'''
if not nums:
raise ValueError('List is empty' )
return sum(A_ ) / len(A_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 709 |
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def A ( lowercase , lowercase ) -> Optional[int]:
'''simple docstring'''
assert isinstance(lowercase , lowercase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def A ( lowercase , lowercase , lowercase ) -> Tuple:
'''simple docstring'''
UpperCamelCase = tmp_path / 'cache'
UpperCamelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCamelCase = ParquetDatasetReader(lowercase , cache_dir=lowercase , keep_in_memory=lowercase ).read()
_check_parquet_dataset(lowercase , lowercase )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def A ( lowercase , lowercase , lowercase ) -> Tuple:
'''simple docstring'''
UpperCamelCase = tmp_path / 'cache'
UpperCamelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
UpperCamelCase = features.copy() if features else default_expected_features
UpperCamelCase = (
Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase = ParquetDatasetReader(lowercase , features=lowercase , cache_dir=lowercase ).read()
_check_parquet_dataset(lowercase , lowercase )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def A ( lowercase , lowercase , lowercase ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase = tmp_path / 'cache'
UpperCamelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
UpperCamelCase = ParquetDatasetReader(lowercase , cache_dir=lowercase , split=lowercase ).read()
_check_parquet_dataset(lowercase , lowercase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('path_type' , [str, list] )
def A ( lowercase , lowercase , lowercase ) -> Union[str, Any]:
'''simple docstring'''
if issubclass(lowercase , lowercase ):
UpperCamelCase = parquet_path
elif issubclass(lowercase , lowercase ):
UpperCamelCase = [parquet_path]
UpperCamelCase = tmp_path / 'cache'
UpperCamelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
UpperCamelCase = ParquetDatasetReader(lowercase , cache_dir=lowercase ).read()
_check_parquet_dataset(lowercase , lowercase )
def A ( lowercase , lowercase , lowercase=("train",) ) -> Tuple:
'''simple docstring'''
assert isinstance(lowercase , lowercase )
for split in splits:
UpperCamelCase = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def A ( lowercase , lowercase , lowercase ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase = tmp_path / 'cache'
UpperCamelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCamelCase = ParquetDatasetReader(
{'train': parquet_path} , cache_dir=lowercase , keep_in_memory=lowercase ).read()
_check_parquet_datasetdict(lowercase , lowercase )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def A ( lowercase , lowercase , lowercase ) -> List[Any]:
'''simple docstring'''
UpperCamelCase = tmp_path / 'cache'
UpperCamelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
UpperCamelCase = features.copy() if features else default_expected_features
UpperCamelCase = (
Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase = ParquetDatasetReader({'train': parquet_path} , features=lowercase , cache_dir=lowercase ).read()
_check_parquet_datasetdict(lowercase , lowercase )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def A ( lowercase , lowercase , lowercase ) -> Union[str, Any]:
'''simple docstring'''
if split:
UpperCamelCase = {split: parquet_path}
else:
UpperCamelCase = 'train'
UpperCamelCase = {'train': parquet_path, 'test': parquet_path}
UpperCamelCase = tmp_path / 'cache'
UpperCamelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
UpperCamelCase = ParquetDatasetReader(lowercase , cache_dir=lowercase ).read()
_check_parquet_datasetdict(lowercase , lowercase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def A ( lowercase , lowercase ) -> List[Any]:
'''simple docstring'''
UpperCamelCase = ParquetDatasetWriter(lowercase , tmp_path / 'foo.parquet' )
assert writer.write() > 0
UpperCamelCase = pq.ParquetFile(tmp_path / 'foo.parquet' )
UpperCamelCase = pf.read()
assert dataset.data.table == output_table
def A ( lowercase , lowercase ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase = str(shared_datadir / 'test_image_rgb.jpg' )
UpperCamelCase = {'image': [image_path]}
UpperCamelCase = Features({'image': Image()} )
UpperCamelCase = Dataset.from_dict(lowercase , features=lowercase )
UpperCamelCase = ParquetDatasetWriter(lowercase , tmp_path / 'foo.parquet' )
assert writer.write() > 0
UpperCamelCase = Dataset.from_parquet(str(tmp_path / 'foo.parquet' ) )
assert dataset.features == reloaded_dataset.features
UpperCamelCase = ParquetDatasetReader(str(tmp_path / 'foo.parquet' ) , streaming=lowercase ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
'feature, expected' , [
(Features({'foo': Value('int32' )} ), None),
(Features({'image': Image(), 'foo': Value('int32' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({'nested': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def A ( lowercase , lowercase ) -> Union[str, Any]:
'''simple docstring'''
assert get_writer_batch_size(lowercase ) == expected
| 3 | 0 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
_UpperCAmelCase : List[str] = {'''tokenization_wav2vec2_phoneme''': ['''Wav2Vec2PhonemeCTCTokenizer''']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
_UpperCAmelCase : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 710 |
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class lowercase ( unittest.TestCase ):
def __init__( self , A_ , A_=7 , A_=3 , A_=18 , A_=30 , A_=400 , A_=True , A_=None , A_=True , A_=False , A_=True , A_=True , A_=[0.5, 0.5, 0.5] , A_=[0.5, 0.5, 0.5] , ) -> Tuple:
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = num_channels
UpperCamelCase = image_size
UpperCamelCase = min_resolution
UpperCamelCase = max_resolution
UpperCamelCase = do_resize
UpperCamelCase = size if size is not None else {'height': 18, 'width': 20}
UpperCamelCase = do_thumbnail
UpperCamelCase = do_align_axis
UpperCamelCase = do_pad
UpperCamelCase = do_normalize
UpperCamelCase = image_mean
UpperCamelCase = image_std
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class lowercase ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
__lowercase : Optional[int] = DonutImageProcessor if is_vision_available() else None
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = DonutImageProcessingTester(self )
@property
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , 'do_resize' ) )
self.assertTrue(hasattr(A_ , 'size' ) )
self.assertTrue(hasattr(A_ , 'do_thumbnail' ) )
self.assertTrue(hasattr(A_ , 'do_align_long_axis' ) )
self.assertTrue(hasattr(A_ , 'do_pad' ) )
self.assertTrue(hasattr(A_ , 'do_normalize' ) )
self.assertTrue(hasattr(A_ , 'image_mean' ) )
self.assertTrue(hasattr(A_ , 'image_std' ) )
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 20} )
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
# Previous config had dimensions in (width, height) order
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {'height': 84, 'width': 42} )
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
pass
@is_flaky()
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
# Initialize image_processing
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , Image.Image )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
UpperCamelCase = image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
# Initialize image_processing
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , np.ndarray )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
UpperCamelCase = image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
# Initialize image_processing
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
UpperCamelCase = image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
| 3 | 0 |
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCAmelCase : int = logging.get_logger(__name__)
def A ( lowercase ) -> List[str]:
'''simple docstring'''
UpperCamelCase = SwinConfig.from_pretrained(
'microsoft/swin-tiny-patch4-window7-224' , out_features=['stage1', 'stage2', 'stage3', 'stage4'] )
UpperCamelCase = MaskFormerConfig(backbone_config=__snake_case )
UpperCamelCase = 'huggingface/label-files'
if "ade20k-full" in model_name:
# this should be ok
UpperCamelCase = 847
UpperCamelCase = 'maskformer-ade20k-full-id2label.json'
elif "ade" in model_name:
# this should be ok
UpperCamelCase = 150
UpperCamelCase = 'ade20k-id2label.json'
elif "coco-stuff" in model_name:
# this should be ok
UpperCamelCase = 171
UpperCamelCase = 'maskformer-coco-stuff-id2label.json'
elif "coco" in model_name:
# TODO
UpperCamelCase = 133
UpperCamelCase = 'coco-panoptic-id2label.json'
elif "cityscapes" in model_name:
# this should be ok
UpperCamelCase = 19
UpperCamelCase = 'cityscapes-id2label.json'
elif "vistas" in model_name:
# this should be ok
UpperCamelCase = 65
UpperCamelCase = 'mapillary-vistas-id2label.json'
UpperCamelCase = json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type='dataset' ) , 'r' ) )
UpperCamelCase = {int(__snake_case ): v for k, v in idalabel.items()}
return config
def A ( lowercase ) -> Any:
'''simple docstring'''
UpperCamelCase = []
# stem
# fmt: off
rename_keys.append(('backbone.patch_embed.proj.weight', 'model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('backbone.patch_embed.proj.bias', 'model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias') )
rename_keys.append(('backbone.patch_embed.norm.weight', 'model.pixel_level_module.encoder.model.embeddings.norm.weight') )
rename_keys.append(('backbone.patch_embed.norm.bias', 'model.pixel_level_module.encoder.model.embeddings.norm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm1.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm1.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.relative_position_index''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.proj.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.proj.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm2.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm2.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc1.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc1.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc2.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc2.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((f'''backbone.layers.{i}.downsample.reduction.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((f'''backbone.layers.{i}.downsample.norm.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((f'''backbone.layers.{i}.downsample.norm.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append((f'''backbone.norm{i}.weight''', f'''model.pixel_level_module.encoder.hidden_states_norms.{i}.weight''') )
rename_keys.append((f'''backbone.norm{i}.bias''', f'''model.pixel_level_module.encoder.hidden_states_norms.{i}.bias''') )
# FPN
rename_keys.append(('sem_seg_head.layer_4.weight', 'model.pixel_level_module.decoder.fpn.stem.0.weight') )
rename_keys.append(('sem_seg_head.layer_4.norm.weight', 'model.pixel_level_module.decoder.fpn.stem.1.weight') )
rename_keys.append(('sem_seg_head.layer_4.norm.bias', 'model.pixel_level_module.decoder.fpn.stem.1.bias') )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((f'''sem_seg_head.adapter_{source_index}.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight''') )
rename_keys.append((f'''sem_seg_head.adapter_{source_index}.norm.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight''') )
rename_keys.append((f'''sem_seg_head.adapter_{source_index}.norm.bias''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias''') )
rename_keys.append((f'''sem_seg_head.layer_{source_index}.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight''') )
rename_keys.append((f'''sem_seg_head.layer_{source_index}.norm.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight''') )
rename_keys.append((f'''sem_seg_head.layer_{source_index}.norm.bias''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias''') )
rename_keys.append(('sem_seg_head.mask_features.weight', 'model.pixel_level_module.decoder.mask_projection.weight') )
rename_keys.append(('sem_seg_head.mask_features.bias', 'model.pixel_level_module.decoder.mask_projection.bias') )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight''', f'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias''', f'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias''') )
# cross-attention out projection
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias''') )
# MLP 1
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight''', f'''model.transformer_module.decoder.layers.{idx}.fc1.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias''', f'''model.transformer_module.decoder.layers.{idx}.fc1.bias''') )
# MLP 2
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight''', f'''model.transformer_module.decoder.layers.{idx}.fc2.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias''', f'''model.transformer_module.decoder.layers.{idx}.fc2.bias''') )
# layernorm 1 (self-attention layernorm)
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight''', f'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias''', f'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias''') )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias''') )
# layernorm 3 (final layernorm)
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight''', f'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias''', f'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias''') )
rename_keys.append(('sem_seg_head.predictor.transformer.decoder.norm.weight', 'model.transformer_module.decoder.layernorm.weight') )
rename_keys.append(('sem_seg_head.predictor.transformer.decoder.norm.bias', 'model.transformer_module.decoder.layernorm.bias') )
# heads on top
rename_keys.append(('sem_seg_head.predictor.query_embed.weight', 'model.transformer_module.queries_embedder.weight') )
rename_keys.append(('sem_seg_head.predictor.input_proj.weight', 'model.transformer_module.input_projection.weight') )
rename_keys.append(('sem_seg_head.predictor.input_proj.bias', 'model.transformer_module.input_projection.bias') )
rename_keys.append(('sem_seg_head.predictor.class_embed.weight', 'class_predictor.weight') )
rename_keys.append(('sem_seg_head.predictor.class_embed.bias', 'class_predictor.bias') )
for i in range(3 ):
rename_keys.append((f'''sem_seg_head.predictor.mask_embed.layers.{i}.weight''', f'''mask_embedder.{i}.0.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.mask_embed.layers.{i}.bias''', f'''mask_embedder.{i}.0.bias''') )
# fmt: on
return rename_keys
def A ( lowercase , lowercase , lowercase ) -> List[Any]:
'''simple docstring'''
UpperCamelCase = dct.pop(__snake_case )
UpperCamelCase = val
def A ( lowercase , lowercase ) -> Tuple:
'''simple docstring'''
UpperCamelCase = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
UpperCamelCase = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
UpperCamelCase = state_dict.pop(f'''backbone.layers.{i}.blocks.{j}.attn.qkv.weight''' )
UpperCamelCase = state_dict.pop(f'''backbone.layers.{i}.blocks.{j}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase = in_proj_weight[:dim, :]
UpperCamelCase = in_proj_bias[: dim]
UpperCamelCase = in_proj_weight[
dim : dim * 2, :
]
UpperCamelCase = in_proj_bias[
dim : dim * 2
]
UpperCamelCase = in_proj_weight[
-dim :, :
]
UpperCamelCase = in_proj_bias[-dim :]
# fmt: on
def A ( lowercase , lowercase ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
UpperCamelCase = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight''' )
UpperCamelCase = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase = in_proj_weight[: hidden_size, :]
UpperCamelCase = in_proj_bias[:config.hidden_size]
UpperCamelCase = in_proj_weight[hidden_size : hidden_size * 2, :]
UpperCamelCase = in_proj_bias[hidden_size : hidden_size * 2]
UpperCamelCase = in_proj_weight[-hidden_size :, :]
UpperCamelCase = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
UpperCamelCase = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight''' )
UpperCamelCase = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase = in_proj_weight[: hidden_size, :]
UpperCamelCase = in_proj_bias[:config.hidden_size]
UpperCamelCase = in_proj_weight[hidden_size : hidden_size * 2, :]
UpperCamelCase = in_proj_bias[hidden_size : hidden_size * 2]
UpperCamelCase = in_proj_weight[-hidden_size :, :]
UpperCamelCase = in_proj_bias[-hidden_size :]
# fmt: on
def A ( ) -> torch.Tensor:
'''simple docstring'''
UpperCamelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
UpperCamelCase = Image.open(requests.get(__snake_case , stream=__snake_case ).raw )
return im
@torch.no_grad()
def A ( lowercase , lowercase , lowercase , lowercase = False ) -> Tuple:
'''simple docstring'''
UpperCamelCase = get_maskformer_config(__snake_case )
# load original state_dict
with open(__snake_case , 'rb' ) as f:
UpperCamelCase = pickle.load(__snake_case )
UpperCamelCase = data['model']
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
UpperCamelCase = create_rename_keys(__snake_case )
for src, dest in rename_keys:
rename_key(__snake_case , __snake_case , __snake_case )
read_in_swin_q_k_v(__snake_case , config.backbone_config )
read_in_decoder_q_k_v(__snake_case , __snake_case )
# update to torch tensors
for key, value in state_dict.items():
UpperCamelCase = torch.from_numpy(__snake_case )
# load 🤗 model
UpperCamelCase = MaskFormerForInstanceSegmentation(__snake_case )
model.eval()
for name, param in model.named_parameters():
print(__snake_case , param.shape )
UpperCamelCase , UpperCamelCase = model.load_state_dict(__snake_case , strict=__snake_case )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(__snake_case ) == 0, f'''Unexpected keys: {unexpected_keys}'''
# verify results
UpperCamelCase = prepare_img()
if "vistas" in model_name:
UpperCamelCase = 65
elif "cityscapes" in model_name:
UpperCamelCase = 65_535
else:
UpperCamelCase = 255
UpperCamelCase = True if 'ade' in model_name else False
UpperCamelCase = MaskFormerImageProcessor(ignore_index=__snake_case , reduce_labels=__snake_case )
UpperCamelCase = image_processor(__snake_case , return_tensors='pt' )
UpperCamelCase = model(**__snake_case )
print('Logits:' , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
UpperCamelCase = torch.tensor(
[[3.6_3_5_3, -4.4_7_7_0, -2.6_0_6_5], [0.5_0_8_1, -4.2_3_9_4, -3.5_3_4_3], [2.1_9_0_9, -5.0_3_5_3, -1.9_3_2_3]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , __snake_case , atol=1e-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and image processor to {pytorch_dump_folder_path}''' )
Path(__snake_case ).mkdir(exist_ok=__snake_case )
model.save_pretrained(__snake_case )
image_processor.save_pretrained(__snake_case )
if push_to_hub:
print('Pushing model and image processor to the hub...' )
model.push_to_hub(f'''nielsr/{model_name}''' )
image_processor.push_to_hub(f'''nielsr/{model_name}''' )
if __name__ == "__main__":
_UpperCAmelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="maskformer-swin-tiny-ade",
type=str,
help=("Name of the MaskFormer model you'd like to convert",),
)
parser.add_argument(
"--checkpoint_path",
default="/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl",
type=str,
help="Path to the original state dict (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
_UpperCAmelCase : int = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 711 |
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase : Dict = logging.get_logger(__name__)
_UpperCAmelCase : Optional[Any] = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
_UpperCAmelCase : str = {
"vocab_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"
},
"merges_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"
},
"tokenizer_config_file": {
"facebook/blenderbot_small-90M": (
"https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"
)
},
}
_UpperCAmelCase : List[str] = {"facebook/blenderbot_small-90M": 512}
def A ( lowercase ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase = set()
UpperCamelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCamelCase = char
UpperCamelCase = set(lowercase )
return pairs
class lowercase ( _SCREAMING_SNAKE_CASE ):
__lowercase : Optional[Any] = VOCAB_FILES_NAMES
__lowercase : Tuple = PRETRAINED_VOCAB_FILES_MAP
__lowercase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : Any = ["input_ids", "attention_mask"]
def __init__( self , A_ , A_ , A_="__start__" , A_="__end__" , A_="__unk__" , A_="__null__" , **A_ , ) -> List[Any]:
"""simple docstring"""
super().__init__(unk_token=A_ , bos_token=A_ , eos_token=A_ , pad_token=A_ , **A_ )
with open(A_ , encoding='utf-8' ) as vocab_handle:
UpperCamelCase = json.load(A_ )
UpperCamelCase = {v: k for k, v in self.encoder.items()}
with open(A_ , encoding='utf-8' ) as merges_handle:
UpperCamelCase = merges_handle.read().split('\n' )[1:-1]
UpperCamelCase = [tuple(merge.split() ) for merge in merges]
UpperCamelCase = dict(zip(A_ , range(len(A_ ) ) ) )
UpperCamelCase = {}
@property
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
return len(self.encoder )
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def __UpperCamelCase ( self , A_ ) -> str:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
UpperCamelCase = re.sub('([.,!?()])' , r' \1' , A_ )
UpperCamelCase = re.sub('(\')' , r' \1 ' , A_ )
UpperCamelCase = re.sub(r'\s{2,}' , ' ' , A_ )
if "\n" in token:
UpperCamelCase = token.replace('\n' , ' __newln__' )
UpperCamelCase = token.split(' ' )
UpperCamelCase = []
for token in tokens:
if not len(A_ ):
continue
UpperCamelCase = token.lower()
UpperCamelCase = tuple(A_ )
UpperCamelCase = tuple(list(word[:-1] ) + [word[-1] + '</w>'] )
UpperCamelCase = get_pairs(A_ )
if not pairs:
words.append(A_ )
continue
while True:
UpperCamelCase = min(A_ , key=lambda A_ : self.bpe_ranks.get(A_ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
UpperCamelCase , UpperCamelCase = bigram
UpperCamelCase = []
UpperCamelCase = 0
while i < len(A_ ):
try:
UpperCamelCase = word.index(A_ , A_ )
new_word.extend(word[i:j] )
UpperCamelCase = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(A_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCamelCase = tuple(A_ )
UpperCamelCase = new_word
if len(A_ ) == 1:
break
else:
UpperCamelCase = get_pairs(A_ )
UpperCamelCase = '@@ '.join(A_ )
UpperCamelCase = word[:-4]
UpperCamelCase = word
words.append(A_ )
return " ".join(A_ )
def __UpperCamelCase ( self , A_ ) -> List[str]:
"""simple docstring"""
UpperCamelCase = []
UpperCamelCase = re.findall(r'\S+\n?' , A_ )
for token in words:
split_tokens.extend(list(self.bpe(A_ ).split(' ' ) ) )
return split_tokens
def __UpperCamelCase ( self , A_ ) -> int:
"""simple docstring"""
UpperCamelCase = token.lower()
return self.encoder.get(A_ , self.encoder.get(self.unk_token ) )
def __UpperCamelCase ( self , A_ ) -> str:
"""simple docstring"""
return self.decoder.get(A_ , self.unk_token )
def __UpperCamelCase ( self , A_ ) -> str:
"""simple docstring"""
UpperCamelCase = ' '.join(A_ ).replace('@@ ' , '' ).strip()
return out_string
def __UpperCamelCase ( self , A_ , A_ = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(A_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCamelCase = os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
UpperCamelCase = os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(A_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=A_ , ensure_ascii=A_ ) + '\n' )
UpperCamelCase = 0
with open(A_ , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda A_ : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
' Please check that the tokenizer is not corrupted!' )
UpperCamelCase = token_index
writer.write(' '.join(A_ ) + '\n' )
index += 1
return vocab_file, merge_file
| 3 | 0 |
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
_UpperCAmelCase : int = (
"4S 3H 2C 7S 5H",
"9D 8H 2C 6S 7H",
"2D 6D 9D TH 7D",
"TC 8C 2S JH 6C",
"JH 8S TH AH QH",
"TS KS 5S 9S AC",
"KD 6S 9D TH AD",
"KS 8D 4D 9S 4S", # pair
"8C 4S KH JS 4D", # pair
"QH 8H KD JH 8S", # pair
"KC 4H KS 2H 8D", # pair
"KD 4S KC 3H 8S", # pair
"AH 8S AS KC JH", # pair
"3H 4C 4H 3S 2H", # 2 pairs
"5S 5D 2C KH KH", # 2 pairs
"3C KH 5D 5S KH", # 2 pairs
"AS 3C KH AD KH", # 2 pairs
"7C 7S 3S 7H 5S", # 3 of a kind
"7C 7S KH 2H 7H", # 3 of a kind
"AC KH QH AH AS", # 3 of a kind
"2H 4D 3C AS 5S", # straight (low ace)
"3C 5C 4C 2C 6H", # straight
"6S 8S 7S 5H 9H", # straight
"JS QS 9H TS KH", # straight
"QC KH TS JS AH", # straight (high ace)
"8C 9C 5C 3C TC", # flush
"3S 8S 9S 5S KS", # flush
"4C 5C 9C 8C KC", # flush
"JH 8H AH KH QH", # flush
"3D 2H 3H 2C 2D", # full house
"2H 2C 3S 3H 3D", # full house
"KH KC 3S 3H 3D", # full house
"JC 6H JS JD JH", # 4 of a kind
"JC 7H JS JD JH", # 4 of a kind
"JC KH JS JD JH", # 4 of a kind
"2S AS 4S 5S 3S", # straight flush (low ace)
"2D 6D 3D 4D 5D", # straight flush
"5C 6C 3C 7C 4C", # straight flush
"JH 9H TH KH QH", # straight flush
"JH AH TH KH QH", # royal flush (high ace straight flush)
)
_UpperCAmelCase : Tuple = (
("2H 3H 4H 5H 6H", "KS AS TS QS JS", "Loss"),
("2H 3H 4H 5H 6H", "AS AD AC AH JD", "Win"),
("AS AH 2H AD AC", "JS JD JC JH 3D", "Win"),
("2S AH 2H AS AC", "JS JD JC JH AD", "Loss"),
("2S AH 2H AS AC", "2H 3H 5H 6H 7H", "Win"),
("AS 3S 4S 8S 2S", "2H 3H 5H 6H 7H", "Win"),
("2H 3H 5H 6H 7H", "2S 3H 4H 5S 6C", "Win"),
("2S 3H 4H 5S 6C", "3D 4C 5H 6H 2S", "Tie"),
("2S 3H 4H 5S 6C", "AH AC 5H 6H AS", "Win"),
("2S 2H 4H 5S 4C", "AH AC 5H 6H AS", "Loss"),
("2S 2H 4H 5S 4C", "AH AC 5H 6H 7S", "Win"),
("6S AD 7H 4S AS", "AH AC 5H 6H 7S", "Loss"),
("2S AH 4H 5S KC", "AH AC 5H 6H 7S", "Loss"),
("2S 3H 6H 7S 9C", "7H 3C TH 6H 9S", "Loss"),
("4S 5H 6H TS AC", "3S 5H 6H TS AC", "Win"),
("2S AH 4H 5S 6C", "AD 4C 5H 6H 2C", "Tie"),
("AS AH 3H AD AC", "AS AH 2H AD AC", "Win"),
("AH AC 5H 5C QS", "AH AC 5H 5C KS", "Loss"),
("AH AC 5H 5C QS", "KH KC 5H 5C QS", "Win"),
("7C 7S KH 2H 7H", "3C 3S AH 2H 3H", "Win"),
("3C 3S AH 2H 3H", "7C 7S KH 2H 7H", "Loss"),
("6H 5H 4H 3H 2H", "5H 4H 3H 2H AH", "Win"),
("5H 4H 3H 2H AH", "5H 4H 3H 2H AH", "Tie"),
("5H 4H 3H 2H AH", "6H 5H 4H 3H 2H", "Loss"),
("AH AD KS KC AC", "AH KD KH AC KC", "Win"),
("2H 4D 3C AS 5S", "2H 4D 3C 6S 5S", "Loss"),
("2H 3S 3C 3H 2S", "3S 3C 2S 2H 2D", "Win"),
("4D 6D 5D 2D JH", "3S 8S 3H TC KH", "Loss"),
("4S 6C 8S 3S 7S", "AD KS 2D 7D 7C", "Loss"),
("6S 4C 7H 8C 3H", "5H JC AH 9D 9C", "Loss"),
("9D 9H JH TC QH", "3C 2S JS 5C 7H", "Win"),
("2H TC 8S AD 9S", "4H TS 7H 2C 5C", "Win"),
("9D 3S 2C 7S 7C", "JC TD 3C TC 9H", "Loss"),
)
_UpperCAmelCase : List[Any] = (
("2H 3H 4H 5H 6H", True),
("AS AH 2H AD AC", False),
("2H 3H 5H 6H 7H", True),
("KS AS TS QS JS", True),
("8H 9H QS JS TH", False),
("AS 3S 4S 8S 2S", True),
)
_UpperCAmelCase : List[Any] = (
("2H 3H 4H 5H 6H", True),
("AS AH 2H AD AC", False),
("2H 3H 5H 6H 7H", False),
("KS AS TS QS JS", True),
("8H 9H QS JS TH", True),
)
_UpperCAmelCase : Union[str, Any] = (
("2H 4D 3C AS 5S", True, [5, 4, 3, 2, 14]),
("2H 5D 3C AS 5S", False, [14, 5, 5, 3, 2]),
("JH QD KC AS TS", False, [14, 13, 12, 11, 10]),
("9D 3S 2C 7S 7C", False, [9, 7, 7, 3, 2]),
)
_UpperCAmelCase : str = (
("JH AH TH KH QH", 0),
("JH 9H TH KH QH", 0),
("JC KH JS JD JH", 7),
("KH KC 3S 3H 3D", 6),
("8C 9C 5C 3C TC", 0),
("JS QS 9H TS KH", 0),
("7C 7S KH 2H 7H", 3),
("3C KH 5D 5S KH", 2),
("QH 8H KD JH 8S", 1),
("2D 6D 9D TH 7D", 0),
)
_UpperCAmelCase : List[str] = (
("JH AH TH KH QH", 23),
("JH 9H TH KH QH", 22),
("JC KH JS JD JH", 21),
("KH KC 3S 3H 3D", 20),
("8C 9C 5C 3C TC", 19),
("JS QS 9H TS KH", 18),
("7C 7S KH 2H 7H", 17),
("3C KH 5D 5S KH", 16),
("QH 8H KD JH 8S", 15),
("2D 6D 9D TH 7D", 14),
)
def A ( ) -> Any:
'''simple docstring'''
UpperCamelCase = randrange(len(lowerCamelCase_ ) ), randrange(len(lowerCamelCase_ ) )
UpperCamelCase = ["""Loss""", """Tie""", """Win"""][(play >= oppo) + (play > oppo)]
UpperCamelCase = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def A ( lowercase = 100 ) -> Optional[int]:
'''simple docstring'''
return (generate_random_hand() for _ in range(lowerCamelCase_ ))
@pytest.mark.parametrize('hand, expected' , lowerCamelCase_ )
def A ( lowercase , lowercase ) -> Tuple:
'''simple docstring'''
assert PokerHand(lowerCamelCase_ )._is_flush() == expected
@pytest.mark.parametrize('hand, expected' , lowerCamelCase_ )
def A ( lowercase , lowercase ) -> Union[str, Any]:
'''simple docstring'''
assert PokerHand(lowerCamelCase_ )._is_straight() == expected
@pytest.mark.parametrize('hand, expected, card_values' , lowerCamelCase_ )
def A ( lowercase , lowercase , lowercase ) -> Tuple:
'''simple docstring'''
UpperCamelCase = PokerHand(lowerCamelCase_ )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('hand, expected' , lowerCamelCase_ )
def A ( lowercase , lowercase ) -> int:
'''simple docstring'''
assert PokerHand(lowerCamelCase_ )._is_same_kind() == expected
@pytest.mark.parametrize('hand, expected' , lowerCamelCase_ )
def A ( lowercase , lowercase ) -> int:
'''simple docstring'''
assert PokerHand(lowerCamelCase_ )._hand_type == expected
@pytest.mark.parametrize('hand, other, expected' , lowerCamelCase_ )
def A ( lowercase , lowercase , lowercase ) -> List[Any]:
'''simple docstring'''
assert PokerHand(lowerCamelCase_ ).compare_with(PokerHand(lowerCamelCase_ ) ) == expected
@pytest.mark.parametrize('hand, other, expected' , generate_random_hands() )
def A ( lowercase , lowercase , lowercase ) -> Union[str, Any]:
'''simple docstring'''
assert PokerHand(lowerCamelCase_ ).compare_with(PokerHand(lowerCamelCase_ ) ) == expected
def A ( ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = [PokerHand(lowerCamelCase_ ) for hand in SORTED_HANDS]
UpperCamelCase = poker_hands.copy()
shuffle(lowerCamelCase_ )
UpperCamelCase = chain(sorted(lowerCamelCase_ ) )
for index, hand in enumerate(lowerCamelCase_ ):
assert hand == poker_hands[index]
def A ( ) -> List[str]:
'''simple docstring'''
UpperCamelCase = [PokerHand('2D AC 3H 4H 5S' ), PokerHand('2S 3H 4H 5S 6C' )]
pokerhands.sort(reverse=lowerCamelCase_ )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def A ( ) -> str:
'''simple docstring'''
UpperCamelCase = PokerHand('2C 4S AS 3D 5C' )
UpperCamelCase = True
UpperCamelCase = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def A ( ) -> Any:
'''simple docstring'''
UpperCamelCase = 0
UpperCamelCase = os.path.abspath(os.path.dirname(lowerCamelCase_ ) )
UpperCamelCase = os.path.join(lowerCamelCase_ , 'poker_hands.txt' )
with open(lowerCamelCase_ ) as file_hand:
for line in file_hand:
UpperCamelCase = line[:14].strip()
UpperCamelCase = line[15:].strip()
UpperCamelCase = PokerHand(lowerCamelCase_ ), PokerHand(lowerCamelCase_ )
UpperCamelCase = player.compare_with(lowerCamelCase_ )
if output == "Win":
answer += 1
assert answer == 376
| 712 |
def A ( lowercase ) -> str:
'''simple docstring'''
UpperCamelCase = int(lowercase )
if decimal in (0, 1): # Exit cases for the recursion
return str(lowercase )
UpperCamelCase , UpperCamelCase = divmod(lowercase , 2 )
return binary_recursive(lowercase ) + str(lowercase )
def A ( lowercase ) -> str:
'''simple docstring'''
UpperCamelCase = str(lowercase ).strip()
if not number:
raise ValueError('No input value was provided' )
UpperCamelCase = '-' if number.startswith('-' ) else ''
UpperCamelCase = number.lstrip('-' )
if not number.isnumeric():
raise ValueError('Input value is not an integer' )
return f'''{negative}0b{binary_recursive(int(lowercase ) )}'''
if __name__ == "__main__":
from doctest import testmod
testmod()
| 3 | 0 |
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_UpperCAmelCase : Dict = logging.get_logger(__name__)
_UpperCAmelCase : Optional[Any] = {"vocab_file": "vocab.txt", "emoji_file": "emoji.json"}
_UpperCAmelCase : Optional[Any] = {
"vocab_file": {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt",
},
"emoji_file": {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json",
},
}
_UpperCAmelCase : List[Any] = {
"abeja/gpt-neox-japanese-2.7b": 2_048,
}
def A ( lowercase , lowercase ) -> Any:
'''simple docstring'''
with open(A__ , 'r' , encoding='utf-8' ) as f:
UpperCamelCase = json.loads(f.read() )
UpperCamelCase = collections.OrderedDict()
UpperCamelCase = collections.OrderedDict()
UpperCamelCase = collections.OrderedDict()
with open(A__ , 'r' , encoding='utf-8' ) as f:
UpperCamelCase = f.readlines()
UpperCamelCase = [[t.rstrip('\n' )] if (t == ',' or ',' not in t) else t.rstrip('\n' ).split(',' ) for t in token]
for idx, b in enumerate(A__ ):
UpperCamelCase = b
UpperCamelCase = idx
for wd in b:
UpperCamelCase = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class lowercase ( __lowerCamelCase ):
__lowercase : Optional[int] = VOCAB_FILES_NAMES
__lowercase : int = PRETRAINED_VOCAB_FILES_MAP
__lowercase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : Dict = ['input_ids', 'attention_mask']
def __init__( self , A_ , A_ , A_="<|endoftext|>" , A_="<|endoftext|>" , A_="<|startoftext|>" , A_="<|endoftext|>" , A_=False , **A_ , ) -> Dict:
"""simple docstring"""
super().__init__(
unk_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , do_clean_text=UpperCamelCase_ , **UpperCamelCase_ , )
if not os.path.isfile(UpperCamelCase_ ):
raise ValueError(
F'''Can\'t find a vocabulary file at path \'{vocab_file}\'. To load the vocabulary from a Google pretrained'''
' model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
if not os.path.isfile(UpperCamelCase_ ):
raise ValueError(
F'''Can\'t find a emoji file at path \'{emoji_file}\'. To load the emoji information from a Google'''
' pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
UpperCamelCase = do_clean_text
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = load_vocab_and_emoji(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
# self.vocab contains support for character fluctuation unique to Japanese, and has a large number of vocab
return len(self.raw_vocab )
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
return dict(self.raw_vocab , **self.added_tokens_encoder )
def __UpperCamelCase ( self , A_ ) -> List[Any]:
"""simple docstring"""
return self.subword_tokenizer.tokenize(UpperCamelCase_ , clean=self.do_clean_text )
def __UpperCamelCase ( self , A_ ) -> Any:
"""simple docstring"""
return self.vocab.get(UpperCamelCase_ , self.vocab.get(self.unk_token ) )
def __UpperCamelCase ( self , A_ ) -> Optional[Any]:
"""simple docstring"""
return self.subword_tokenizer.convert_id_to_token(UpperCamelCase_ )
def __UpperCamelCase ( self , A_ ) -> List[str]:
"""simple docstring"""
UpperCamelCase = ''.join(UpperCamelCase_ ).strip()
return out_string
def __UpperCamelCase ( self , A_ ) -> Any:
"""simple docstring"""
UpperCamelCase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) + [self.eos_token_id] )
if len(UpperCamelCase_ ) > self.model_max_length:
UpperCamelCase = input_ids[-self.model_max_length :]
return input_ids
def __UpperCamelCase ( self , A_ , A_ = None ) -> List[str]:
"""simple docstring"""
UpperCamelCase = 0
if os.path.isdir(UpperCamelCase_ ):
UpperCamelCase = os.path.join(
UpperCamelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
UpperCamelCase = os.path.join(
UpperCamelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['emoji_file'] )
else:
UpperCamelCase = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['vocab_file']
)
UpperCamelCase = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['emoji_file']
)
with open(UpperCamelCase_ , 'w' , encoding='utf-8' ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
F'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
' Please check that the vocabulary is not corrupted!' )
UpperCamelCase = token_index
writer.write(','.join(UpperCamelCase_ ) + '\n' )
index += 1
with open(UpperCamelCase_ , 'w' , encoding='utf-8' ) as writer:
json.dump(self.emoji , UpperCamelCase_ )
return vocab_file, emoji_file
class lowercase ( __lowerCamelCase ):
def __init__( self , A_ , A_ , A_ ) -> Dict:
"""simple docstring"""
UpperCamelCase = vocab # same as swe
UpperCamelCase = ids_to_tokens # same as bpe
UpperCamelCase = emoji
UpperCamelCase = np.max([len(UpperCamelCase_ ) for w in self.vocab.keys()] )
UpperCamelCase = re.compile(r'(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)' )
UpperCamelCase = re.compile(r'[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*' )
UpperCamelCase = re.compile(r'[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}' )
UpperCamelCase = re.compile(
r'([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
UpperCamelCase = re.compile(
r'(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
UpperCamelCase = re.compile(
r'((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*' )
UpperCamelCase = '─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿'
UpperCamelCase = '▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟'
UpperCamelCase = str.maketrans({k: '<BLOCK>' for k in keisen + blocks} )
def __len__( self ) -> Tuple:
"""simple docstring"""
return len(self.ids_to_tokens )
def __UpperCamelCase ( self , A_ ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = self.content_repattera.sub('<URL>' , UpperCamelCase_ )
UpperCamelCase = self.content_repattera.sub('<EMAIL>' , UpperCamelCase_ )
UpperCamelCase = self.content_repattera.sub('<TEL>' , UpperCamelCase_ )
UpperCamelCase = self.content_repattera.sub('<DATE>' , UpperCamelCase_ )
UpperCamelCase = self.content_repattera.sub('<DATE>' , UpperCamelCase_ )
UpperCamelCase = self.content_repattera.sub('<PRICE>' , UpperCamelCase_ )
UpperCamelCase = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
UpperCamelCase = content.replace('<BLOCK><BLOCK>' , '<BLOCK>' )
return content
def __UpperCamelCase ( self , A_ , A_=False ) -> Dict:
"""simple docstring"""
UpperCamelCase = text.replace(' ' , '<SP>' )
UpperCamelCase = text.replace(' ' , '<SP>' )
UpperCamelCase = text.replace('\r\n' , '<BR>' )
UpperCamelCase = text.replace('\n' , '<BR>' )
UpperCamelCase = text.replace('\r' , '<BR>' )
UpperCamelCase = text.replace('\t' , '<TAB>' )
UpperCamelCase = text.replace('—' , 'ー' )
UpperCamelCase = text.replace('−' , 'ー' )
for k, v in self.emoji["emoji"].items():
if k in text:
UpperCamelCase = text.replace(UpperCamelCase_ , UpperCamelCase_ )
if clean:
UpperCamelCase = self.clean_text(UpperCamelCase_ )
def check_simbol(A_ ):
UpperCamelCase = x.encode()
if len(UpperCamelCase_ ) == 1 and len(UpperCamelCase_ ) == 2:
UpperCamelCase = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0XC_2A1 and c <= 0XC_2BF)
or (c >= 0XC_780 and c <= 0XC_783)
or (c >= 0XC_AB9 and c <= 0XC_BBF)
or (c >= 0XC_C80 and c <= 0XC_DA2)
):
return True
return False
def checkuae(A_ ):
UpperCamelCase = x.encode()
if len(UpperCamelCase_ ) == 1 and len(UpperCamelCase_ ) == 3:
UpperCamelCase = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0XE28_080 and c <= 0XE2B_07F:
return True
return False
UpperCamelCase = 0
UpperCamelCase = []
while pos < len(UpperCamelCase_ ):
UpperCamelCase = min(len(UpperCamelCase_ ) , pos + self.maxlen + 1 ) if text[pos] == '<' else pos + 3
UpperCamelCase = [] # (token_id, token, pos)
for e in range(UpperCamelCase_ , UpperCamelCase_ , -1 ):
UpperCamelCase = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(UpperCamelCase_ ) > 2:
UpperCamelCase = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(UpperCamelCase_ ) > 0:
# the smallest token_id is adopted
UpperCamelCase , UpperCamelCase , UpperCamelCase = sorted(UpperCamelCase_ , key=lambda A_ : x[0] )[0]
result.append(UpperCamelCase_ )
UpperCamelCase = e
else:
UpperCamelCase = pos + 1
UpperCamelCase = text[pos:end]
if check_simbol(UpperCamelCase_ ):
result.append('<KIGOU>' )
elif checkuae(UpperCamelCase_ ):
result.append('<U2000U2BFF>' )
else:
for i in wd.encode('utf-8' ):
result.append('<|byte%d|>' % i )
UpperCamelCase = end
return result
def __UpperCamelCase ( self , A_ , A_="\n" ) -> Any:
"""simple docstring"""
UpperCamelCase = []
UpperCamelCase = []
UpperCamelCase = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(UpperCamelCase_ ) > 0:
words.append(bytearray(UpperCamelCase_ ).decode('utf-8' , errors='replace' ) )
UpperCamelCase = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji['emoji_inv'][word] )
elif word == "<SP>":
words.append(' ' )
elif word == "<BR>":
words.append(UpperCamelCase_ )
elif word == "<TAB>":
words.append('\t' )
elif word == "<BLOCK>":
words.append('▀' )
elif word == "<KIGOU>":
words.append('ǀ' )
elif word == "<U2000U2BFF>":
words.append('‖' )
else:
words.append(UpperCamelCase_ )
if len(UpperCamelCase_ ) > 0:
words.append(bytearray(UpperCamelCase_ ).decode('utf-8' , errors='replace' ) )
UpperCamelCase = ''.join(UpperCamelCase_ )
return text
| 713 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_UpperCAmelCase : Tuple = logging.get_logger(__name__)
_UpperCAmelCase : Tuple = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.linear_k": "encoder.layers.*.self_attn.linear_k",
"self_attn.linear_v": "encoder.layers.*.self_attn.linear_v",
"self_attn.linear_q": "encoder.layers.*.self_attn.linear_q",
"self_attn.pos_bias_u": "encoder.layers.*.self_attn.pos_bias_u",
"self_attn.pos_bias_v": "encoder.layers.*.self_attn.pos_bias_v",
"self_attn.linear_out": "encoder.layers.*.self_attn.linear_out",
"self_attn.linear_pos": "encoder.layers.*.self_attn.linear_pos",
"self_attn.rotary_emb": "encoder.embed_positions",
"self_attn_layer_norm": "encoder.layers.*.self_attn_layer_norm",
"conv_module.pointwise_conv1": "encoder.layers.*.conv_module.pointwise_conv1",
"conv_module.pointwise_conv2": "encoder.layers.*.conv_module.pointwise_conv2",
"conv_module.depthwise_conv": "encoder.layers.*.conv_module.depthwise_conv",
"conv_module.batch_norm": "encoder.layers.*.conv_module.batch_norm",
"conv_module.layer_norm": "encoder.layers.*.conv_module.layer_norm",
"ffn1.w_1": "encoder.layers.*.ffn1.intermediate_dense",
"ffn1.w_2": "encoder.layers.*.ffn1.output_dense",
"ffn1.layer_norm": "encoder.layers.*.ffn1_layer_norm",
"ffn2.w_1": "encoder.layers.*.ffn2.intermediate_dense",
"ffn2.w_2": "encoder.layers.*.ffn2.output_dense",
"ffn2.layer_norm": "encoder.layers.*.ffn2_layer_norm",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
_UpperCAmelCase : Any = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def A ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> Dict:
'''simple docstring'''
for attribute in key.split('.' ):
UpperCamelCase = getattr(lowercase , lowercase )
if weight_type is not None:
UpperCamelCase = getattr(lowercase , lowercase ).shape
else:
UpperCamelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
UpperCamelCase = value
elif weight_type == "weight_g":
UpperCamelCase = value
elif weight_type == "weight_v":
UpperCamelCase = value
elif weight_type == "bias":
UpperCamelCase = value
elif weight_type == "running_mean":
UpperCamelCase = value
elif weight_type == "running_var":
UpperCamelCase = value
elif weight_type == "num_batches_tracked":
UpperCamelCase = value
elif weight_type == "inv_freq":
UpperCamelCase = value
else:
UpperCamelCase = value
logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def A ( lowercase , lowercase , lowercase ) -> Any:
'''simple docstring'''
UpperCamelCase = []
UpperCamelCase = fairseq_model.state_dict()
UpperCamelCase = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
UpperCamelCase = False
if "conv_layers" in name:
load_conv_layer(
lowercase , lowercase , lowercase , lowercase , hf_model.config.feat_extract_norm == 'group' , )
UpperCamelCase = True
else:
for key, mapped_key in MAPPING.items():
UpperCamelCase = 'wav2vec2_conformer.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
UpperCamelCase = True
if "*" in mapped_key:
UpperCamelCase = name.split(lowercase )[0].split('.' )[-2]
UpperCamelCase = mapped_key.replace('*' , lowercase )
if "pos_bias_u" in name:
UpperCamelCase = None
elif "pos_bias_v" in name:
UpperCamelCase = None
elif "weight_g" in name:
UpperCamelCase = 'weight_g'
elif "weight_v" in name:
UpperCamelCase = 'weight_v'
elif "bias" in name:
UpperCamelCase = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCamelCase = 'weight'
elif "running_mean" in name:
UpperCamelCase = 'running_mean'
elif "inv_freq" in name:
UpperCamelCase = 'inv_freq'
elif "running_var" in name:
UpperCamelCase = 'running_var'
elif "num_batches_tracked" in name:
UpperCamelCase = 'num_batches_tracked'
else:
UpperCamelCase = None
set_recursively(lowercase , lowercase , lowercase , lowercase , lowercase )
continue
if not is_used:
unused_weights.append(lowercase )
logger.warning(f'''Unused weights: {unused_weights}''' )
def A ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase = full_name.split('conv_layers.' )[-1]
UpperCamelCase = name.split('.' )
UpperCamelCase = int(items[0] )
UpperCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
UpperCamelCase = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
UpperCamelCase = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
UpperCamelCase = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
UpperCamelCase = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(lowercase )
@torch.no_grad()
def A ( lowercase , lowercase , lowercase=None , lowercase=None , lowercase=True ) -> int:
'''simple docstring'''
if config_path is not None:
UpperCamelCase = WavaVecaConformerConfig.from_pretrained(lowercase , hidden_act='swish' )
else:
UpperCamelCase = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
UpperCamelCase = 'rotary'
if is_finetuned:
if dict_path:
UpperCamelCase = Dictionary.load(lowercase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCamelCase = target_dict.pad_index
UpperCamelCase = target_dict.bos_index
UpperCamelCase = target_dict.eos_index
UpperCamelCase = len(target_dict.symbols )
UpperCamelCase = os.path.join(lowercase , 'vocab.json' )
if not os.path.isdir(lowercase ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(lowercase ) )
return
os.makedirs(lowercase , exist_ok=lowercase )
UpperCamelCase = target_dict.indices
# fairseq has the <pad> and <s> switched
UpperCamelCase = 0
UpperCamelCase = 1
with open(lowercase , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(lowercase , lowercase )
UpperCamelCase = WavaVecaCTCTokenizer(
lowercase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=lowercase , )
UpperCamelCase = True if config.feat_extract_norm == 'layer' else False
UpperCamelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=lowercase , return_attention_mask=lowercase , )
UpperCamelCase = WavaVecaProcessor(feature_extractor=lowercase , tokenizer=lowercase )
processor.save_pretrained(lowercase )
UpperCamelCase = WavaVecaConformerForCTC(lowercase )
else:
UpperCamelCase = WavaVecaConformerForPreTraining(lowercase )
if is_finetuned:
UpperCamelCase , UpperCamelCase , UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
UpperCamelCase = argparse.Namespace(task='audio_pretraining' )
UpperCamelCase = fairseq.tasks.setup_task(lowercase )
UpperCamelCase , UpperCamelCase , UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowercase )
UpperCamelCase = model[0].eval()
recursively_load_weights(lowercase , lowercase , not is_finetuned )
hf_wavavec.save_pretrained(lowercase )
if __name__ == "__main__":
_UpperCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
_UpperCAmelCase : Dict = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 3 | 0 |
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class lowercase ( _UpperCamelCase ):
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(A_ , 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(A_ , 'neck_hidden_sizes' ) )
self.parent.assertTrue(hasattr(A_ , 'num_attention_heads' ) )
class lowercase :
def __init__( self , A_ , A_=13 , A_=32 , A_=2 , A_=3 , A_=640 , A_=4 , A_="silu" , A_=3 , A_=32 , A_=0.1 , A_=0.1 , A_=0.1 , A_=0.02 , A_=True , A_=True , A_=10 , A_=None , ) -> Tuple:
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = image_size
UpperCamelCase = patch_size
UpperCamelCase = num_channels
UpperCamelCase = last_hidden_size
UpperCamelCase = num_attention_heads
UpperCamelCase = hidden_act
UpperCamelCase = conv_kernel_size
UpperCamelCase = output_stride
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = classifier_dropout_prob
UpperCamelCase = use_labels
UpperCamelCase = is_training
UpperCamelCase = num_labels
UpperCamelCase = initializer_range
UpperCamelCase = scope
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase = None
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCamelCase = self.get_config()
return config, pixel_values, labels, pixel_labels
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def __UpperCamelCase ( self , A_ , A_ , A_ , A_ ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = MobileViTModel(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __UpperCamelCase ( self , A_ , A_ , A_ , A_ ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = self.num_labels
UpperCamelCase = MobileViTForImageClassification(A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self , A_ , A_ , A_ , A_ ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = self.num_labels
UpperCamelCase = MobileViTForSemanticSegmentation(A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
UpperCamelCase = model(A_ , labels=A_ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase = config_and_inputs
UpperCamelCase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
__lowercase : int = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
__lowercase : Dict = (
{
"feature-extraction": MobileViTModel,
"image-classification": MobileViTForImageClassification,
"image-segmentation": MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__lowercase : Union[str, Any] = False
__lowercase : Union[str, Any] = False
__lowercase : List[str] = False
__lowercase : List[str] = False
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = MobileViTModelTester(self )
UpperCamelCase = MobileViTConfigTester(self , config_class=A_ , has_text_modality=A_ )
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileViT does not use inputs_embeds' )
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason='MobileViT does not support input and output embeddings' )
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip(reason='MobileViT does not output attentions' )
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
pass
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(A_ )
UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase = [*signature.parameters.keys()]
UpperCamelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , A_ )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
pass
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
def check_hidden_states_output(A_ , A_ , A_ ):
UpperCamelCase = model_class(A_ )
model.to(A_ )
model.eval()
with torch.no_grad():
UpperCamelCase = model(**self._prepare_for_class(A_ , A_ ) )
UpperCamelCase = outputs.hidden_states
UpperCamelCase = 5
self.assertEqual(len(A_ ) , A_ )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
UpperCamelCase = 2
for i in range(len(A_ ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = True
check_hidden_states_output(A_ , A_ , A_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase = True
check_hidden_states_output(A_ , A_ , A_ )
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A_ )
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*A_ )
@slow
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = MobileViTModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def A ( ) -> int:
'''simple docstring'''
UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
return MobileViTImageProcessor.from_pretrained('apple/mobilevit-xx-small' ) if is_vision_available() else None
@slow
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = MobileViTForImageClassification.from_pretrained('apple/mobilevit-xx-small' ).to(A_ )
UpperCamelCase = self.default_image_processor
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=A_ , return_tensors='pt' ).to(A_ )
# forward pass
with torch.no_grad():
UpperCamelCase = model(**A_ )
# verify the logits
UpperCamelCase = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , A_ )
UpperCamelCase = torch.tensor([-1.9364, -1.2327, -0.4653] ).to(A_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A_ , atol=1e-4 ) )
@slow
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
UpperCamelCase = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
UpperCamelCase = model.to(A_ )
UpperCamelCase = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=A_ , return_tensors='pt' ).to(A_ )
# forward pass
with torch.no_grad():
UpperCamelCase = model(**A_ )
UpperCamelCase = outputs.logits
# verify the logits
UpperCamelCase = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , A_ )
UpperCamelCase = torch.tensor(
[
[[6.9713, 6.9786, 7.2422], [7.2893, 7.2825, 7.4446], [7.6580, 7.8797, 7.9420]],
[[-10.6869, -10.3250, -10.3471], [-10.4228, -9.9868, -9.7132], [-11.0405, -11.0221, -10.7318]],
[[-3.3089, -2.8539, -2.6740], [-3.2706, -2.5621, -2.5108], [-3.2534, -2.6615, -2.6651]],
] , device=A_ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , A_ , atol=1e-4 ) )
@slow
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
UpperCamelCase = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
UpperCamelCase = model.to(A_ )
UpperCamelCase = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=A_ , return_tensors='pt' ).to(A_ )
# forward pass
with torch.no_grad():
UpperCamelCase = model(**A_ )
UpperCamelCase = outputs.logits.detach().cpu()
UpperCamelCase = image_processor.post_process_semantic_segmentation(outputs=A_ , target_sizes=[(50, 60)] )
UpperCamelCase = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , A_ )
UpperCamelCase = image_processor.post_process_semantic_segmentation(outputs=A_ )
UpperCamelCase = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , A_ )
| 714 |
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
_UpperCAmelCase : Any = "\\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n"
_UpperCAmelCase : str = "\\nGLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n"
_UpperCAmelCase : List[str] = "\nCompute GLUE evaluation metric associated to each GLUE dataset.\nArgs:\n predictions: list of predictions to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\nReturns: depending on the GLUE subset, one or several of:\n \"accuracy\": Accuracy\n \"f1\": F1 score\n \"pearson\": Pearson Correlation\n \"spearmanr\": Spearman Correlation\n \"matthews_correlation\": Matthew Correlation\nExamples:\n\n >>> glue_metric = datasets.load_metric('glue', 'sst2') # 'sst2' or any of [\"mnli\", \"mnli_mismatched\", \"mnli_matched\", \"qnli\", \"rte\", \"wnli\", \"hans\"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'mrpc') # 'mrpc' or 'qqp'\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0, 'f1': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'stsb')\n >>> references = [0., 1., 2., 3., 4., 5.]\n >>> predictions = [0., 1., 2., 3., 4., 5.]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print({\"pearson\": round(results[\"pearson\"], 2), \"spearmanr\": round(results[\"spearmanr\"], 2)})\n {'pearson': 1.0, 'spearmanr': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'cola')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'matthews_correlation': 1.0}\n"
def A ( lowercase , lowercase ) -> List[str]:
'''simple docstring'''
return float((preds == labels).mean() )
def A ( lowercase , lowercase ) -> Tuple:
'''simple docstring'''
UpperCamelCase = simple_accuracy(lowercase , lowercase )
UpperCamelCase = float(fa_score(y_true=lowercase , y_pred=lowercase ) )
return {
"accuracy": acc,
"f1": fa,
}
def A ( lowercase , lowercase ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase = float(pearsonr(lowercase , lowercase )[0] )
UpperCamelCase = float(spearmanr(lowercase , lowercase )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
'references': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
} ) , codebase_urls=[] , reference_urls=[] , format='numpy' , )
def __UpperCamelCase ( self , A_ , A_ ) -> Any:
"""simple docstring"""
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(A_ , A_ )}
elif self.config_name == "stsb":
return pearson_and_spearman(A_ , A_ )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(A_ , A_ )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(A_ , A_ )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' )
| 3 | 0 |
import math
def A ( lowercase , lowercase ) -> float:
'''simple docstring'''
return math.pow(a__ , 2 ) - a
def A ( lowercase ) -> float:
'''simple docstring'''
return 2 * x
def A ( lowercase ) -> float:
'''simple docstring'''
UpperCamelCase = 2.0
while start <= a:
UpperCamelCase = math.pow(a__ , 2 )
return start
def A ( lowercase , lowercase = 9_999 , lowercase = 0.0_0_0_0_0_0_0_0_0_0_0_0_0_1 ) -> float:
'''simple docstring'''
if a < 0:
raise ValueError('math domain error' )
UpperCamelCase = get_initial_point(a__ )
for _ in range(a__ ):
UpperCamelCase = value
UpperCamelCase = value - fx(a__ , a__ ) / fx_derivative(a__ )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 715 |
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
_UpperCAmelCase : str = "scheduler_config.json"
class lowercase ( _SCREAMING_SNAKE_CASE ):
__lowercase : Tuple = 1
__lowercase : int = 2
__lowercase : List[Any] = 3
__lowercase : str = 4
__lowercase : Optional[Any] = 5
@dataclass
class lowercase ( _SCREAMING_SNAKE_CASE ):
__lowercase : jnp.ndarray
class lowercase :
__lowercase : Union[str, Any] = SCHEDULER_CONFIG_NAME
__lowercase : Dict = ["dtype"]
__lowercase : List[Any] = []
__lowercase : Dict = True
@classmethod
def __UpperCamelCase ( cls , A_ = None , A_ = None , A_=False , **A_ , ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = cls.load_config(
pretrained_model_name_or_path=A_ , subfolder=A_ , return_unused_kwargs=A_ , **A_ , )
UpperCamelCase , UpperCamelCase = cls.from_config(A_ , return_unused_kwargs=A_ , **A_ )
if hasattr(A_ , 'create_state' ) and getattr(A_ , 'has_state' , A_ ):
UpperCamelCase = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def __UpperCamelCase ( self , A_ , A_ = False , **A_ ) -> str:
"""simple docstring"""
self.save_config(save_directory=A_ , push_to_hub=A_ , **A_ )
@property
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
return self._get_compatibles()
@classmethod
def __UpperCamelCase ( cls ) -> int:
"""simple docstring"""
UpperCamelCase = list(set([cls.__name__] + cls._compatibles ) )
UpperCamelCase = importlib.import_module(__name__.split('.' )[0] )
UpperCamelCase = [
getattr(A_ , A_ ) for c in compatible_classes_str if hasattr(A_ , A_ )
]
return compatible_classes
def A ( lowercase , lowercase ) -> jnp.ndarray:
'''simple docstring'''
assert len(lowercase ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(lowercase ) - x.ndim) ) , lowercase )
def A ( lowercase , lowercase=0.9_9_9 , lowercase=jnp.floataa ) -> jnp.ndarray:
'''simple docstring'''
def alpha_bar(lowercase ):
return math.cos((time_step + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
UpperCamelCase = []
for i in range(lowercase ):
UpperCamelCase = i / num_diffusion_timesteps
UpperCamelCase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(lowercase ) / alpha_bar(lowercase ) , lowercase ) )
return jnp.array(lowercase , dtype=lowercase )
@flax.struct.dataclass
class lowercase :
__lowercase : jnp.ndarray
__lowercase : jnp.ndarray
__lowercase : jnp.ndarray
@classmethod
def __UpperCamelCase ( cls , A_ ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = scheduler.config
if config.trained_betas is not None:
UpperCamelCase = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
UpperCamelCase = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
UpperCamelCase = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
UpperCamelCase = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
F'''beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}''' )
UpperCamelCase = 1.0 - betas
UpperCamelCase = jnp.cumprod(A_ , axis=0 )
return cls(
alphas=A_ , betas=A_ , alphas_cumprod=A_ , )
def A ( lowercase , lowercase , lowercase , lowercase ) -> List[Any]:
'''simple docstring'''
UpperCamelCase = state.alphas_cumprod
UpperCamelCase = alphas_cumprod[timesteps] ** 0.5
UpperCamelCase = sqrt_alpha_prod.flatten()
UpperCamelCase = broadcast_to_shape_from_left(lowercase , original_samples.shape )
UpperCamelCase = (1 - alphas_cumprod[timesteps]) ** 0.5
UpperCamelCase = sqrt_one_minus_alpha_prod.flatten()
UpperCamelCase = broadcast_to_shape_from_left(lowercase , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def A ( lowercase , lowercase , lowercase , lowercase ) -> Dict:
'''simple docstring'''
UpperCamelCase , UpperCamelCase = get_sqrt_alpha_prod(lowercase , lowercase , lowercase , lowercase )
UpperCamelCase = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def A ( lowercase , lowercase , lowercase , lowercase ) -> int:
'''simple docstring'''
UpperCamelCase , UpperCamelCase = get_sqrt_alpha_prod(lowercase , lowercase , lowercase , lowercase )
UpperCamelCase = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 3 | 0 |
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
_UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
@add_end_docstrings(
__lowerCAmelCase , r"\n top_k (`int`, defaults to 5):\n The number of predictions to return.\n targets (`str` or `List[str]`, *optional*):\n When passed, the model will limit the scores to the passed targets instead of looking up in the whole\n vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting\n token will be used (with a warning, and that might be slower).\n\n " , )
class lowercase ( __lowerCAmelCase ):
def __UpperCamelCase ( self , A_ ) -> np.ndarray:
"""simple docstring"""
if self.framework == "tf":
UpperCamelCase = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
UpperCamelCase = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=lowerCAmelCase_ )
else:
raise ValueError('Unsupported framework' )
return masked_index
def __UpperCamelCase ( self , A_ ) -> np.ndarray:
"""simple docstring"""
UpperCamelCase = self.get_masked_index(lowerCAmelCase_ )
UpperCamelCase = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
'fill-mask' , self.model.base_model_prefix , F'''No mask_token ({self.tokenizer.mask_token}) found on the input''' , )
def __UpperCamelCase ( self , A_ ) -> int:
"""simple docstring"""
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input['input_ids'][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(lowerCAmelCase_ )
def __UpperCamelCase ( self , A_ , A_=None , **A_ ) -> Dict[str, GenericTensor]:
"""simple docstring"""
if return_tensors is None:
UpperCamelCase = self.framework
UpperCamelCase = self.tokenizer(lowerCAmelCase_ , return_tensors=lowerCAmelCase_ )
self.ensure_exactly_one_mask_token(lowerCAmelCase_ )
return model_inputs
def __UpperCamelCase ( self , A_ ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = self.model(**lowerCAmelCase_ )
UpperCamelCase = model_inputs['input_ids']
return model_outputs
def __UpperCamelCase ( self , A_ , A_=5 , A_=None ) -> Optional[int]:
"""simple docstring"""
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
UpperCamelCase = target_ids.shape[0]
UpperCamelCase = model_outputs['input_ids'][0]
UpperCamelCase = model_outputs['logits']
if self.framework == "tf":
UpperCamelCase = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
UpperCamelCase = outputs.numpy()
UpperCamelCase = outputs[0, masked_index, :]
UpperCamelCase = stable_softmax(lowerCAmelCase_ , axis=-1 )
if target_ids is not None:
UpperCamelCase = tf.gather_nd(tf.squeeze(lowerCAmelCase_ , 0 ) , target_ids.reshape(-1 , 1 ) )
UpperCamelCase = tf.expand_dims(lowerCAmelCase_ , 0 )
UpperCamelCase = tf.math.top_k(lowerCAmelCase_ , k=lowerCAmelCase_ )
UpperCamelCase , UpperCamelCase = topk.values.numpy(), topk.indices.numpy()
else:
UpperCamelCase = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=lowerCAmelCase_ ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
UpperCamelCase = outputs[0, masked_index, :]
UpperCamelCase = logits.softmax(dim=-1 )
if target_ids is not None:
UpperCamelCase = probs[..., target_ids]
UpperCamelCase , UpperCamelCase = probs.topk(lowerCAmelCase_ )
UpperCamelCase = []
UpperCamelCase = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
UpperCamelCase = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
UpperCamelCase = input_ids.numpy().copy()
if target_ids is not None:
UpperCamelCase = target_ids[p].tolist()
UpperCamelCase = p
# Filter padding out:
UpperCamelCase = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
UpperCamelCase = self.tokenizer.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
UpperCamelCase = {'score': v, 'token': p, 'token_str': self.tokenizer.decode([p] ), 'sequence': sequence}
row.append(lowerCAmelCase_ )
result.append(lowerCAmelCase_ )
if single_mask:
return result[0]
return result
def __UpperCamelCase ( self , A_ , A_=None ) -> Optional[Any]:
"""simple docstring"""
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
UpperCamelCase = [targets]
try:
UpperCamelCase = self.tokenizer.get_vocab()
except Exception:
UpperCamelCase = {}
UpperCamelCase = []
for target in targets:
UpperCamelCase = vocab.get(lowerCAmelCase_ , lowerCAmelCase_ )
if id_ is None:
UpperCamelCase = self.tokenizer(
lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , max_length=1 , truncation=lowerCAmelCase_ , )['input_ids']
if len(lowerCAmelCase_ ) == 0:
logger.warning(
F'''The specified target token `{target}` does not exist in the model vocabulary. '''
'We cannot replace it with anything meaningful, ignoring it' )
continue
UpperCamelCase = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
F'''The specified target token `{target}` does not exist in the model vocabulary. '''
F'''Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.''' )
target_ids.append(id_ )
UpperCamelCase = list(set(lowerCAmelCase_ ) )
if len(lowerCAmelCase_ ) == 0:
raise ValueError('At least one target must be provided when passed.' )
UpperCamelCase = np.array(lowerCAmelCase_ )
return target_ids
def __UpperCamelCase ( self , A_=None , A_=None ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = {}
if targets is not None:
UpperCamelCase = self.get_target_ids(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCamelCase = target_ids
if top_k is not None:
UpperCamelCase = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
'fill-mask' , self.model.base_model_prefix , 'The tokenizer does not define a `mask_token`.' )
return {}, {}, postprocess_params
def __call__( self , A_ , *A_ , **A_ ) -> Dict:
"""simple docstring"""
UpperCamelCase = super().__call__(lowerCAmelCase_ , **lowerCAmelCase_ )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and len(lowerCAmelCase_ ) == 1:
return outputs[0]
return outputs
| 716 |
from abc import ABC, abstractmethod
from typing import List, Optional
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self ) -> Optional[Any]:
"""simple docstring"""
# test for the above condition
self.test()
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = 0
UpperCamelCase = False
while not completed:
if counter == 1:
self.reset()
UpperCamelCase = self.advance()
if not self.does_advance(A_ ):
raise Exception(
'Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.' )
UpperCamelCase , UpperCamelCase , UpperCamelCase = self.update(A_ )
counter += 1
if counter > 10_000:
raise Exception('update() does not fulfill the constraint.' )
if self.remaining() != 0:
raise Exception('Custom Constraint is not defined correctly.' )
@abstractmethod
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __UpperCamelCase ( self , A_ ) -> str:
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __UpperCamelCase ( self , A_ ) -> int:
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __UpperCamelCase ( self , A_=False ) -> int:
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ ) -> Any:
"""simple docstring"""
super(A_ , self ).__init__()
if not isinstance(A_ , A_ ) or len(A_ ) == 0:
raise ValueError(F'''`token_ids` has to be a non-empty list, but is {token_ids}.''' )
if any((not isinstance(A_ , A_ ) or token_id < 0) for token_id in token_ids ):
raise ValueError(F'''Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.''' )
UpperCamelCase = token_ids
UpperCamelCase = len(self.token_ids )
UpperCamelCase = -1 # the index of the currently fulfilled step
UpperCamelCase = False
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def __UpperCamelCase ( self , A_ ) -> Optional[int]:
"""simple docstring"""
if not isinstance(A_ , A_ ):
raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(A_ )}''' )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def __UpperCamelCase ( self , A_ ) -> Optional[int]:
"""simple docstring"""
if not isinstance(A_ , A_ ):
raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(A_ )}''' )
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
if self.does_advance(A_ ):
self.fulfilled_idx += 1
UpperCamelCase = True
if self.fulfilled_idx == (self.seqlen - 1):
UpperCamelCase = True
UpperCamelCase = completed
else:
# failed to make progress.
UpperCamelCase = True
self.reset()
return stepped, completed, reset
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = False
UpperCamelCase = 0
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
return self.seqlen - (self.fulfilled_idx + 1)
def __UpperCamelCase ( self , A_=False ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = PhrasalConstraint(self.token_ids )
if stateful:
UpperCamelCase = self.seqlen
UpperCamelCase = self.fulfilled_idx
UpperCamelCase = self.completed
return new_constraint
class lowercase :
def __init__( self , A_ , A_=True ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = max([len(A_ ) for one in nested_token_ids] )
UpperCamelCase = {}
for token_ids in nested_token_ids:
UpperCamelCase = root
for tidx, token_id in enumerate(A_ ):
if token_id not in level:
UpperCamelCase = {}
UpperCamelCase = level[token_id]
if no_subsets and self.has_subsets(A_ , A_ ):
raise ValueError(
'Each list in `nested_token_ids` can\'t be a complete subset of another list, but is'
F''' {nested_token_ids}.''' )
UpperCamelCase = root
def __UpperCamelCase ( self , A_ ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = self.trie
for current_token in current_seq:
UpperCamelCase = start[current_token]
UpperCamelCase = list(start.keys() )
return next_tokens
def __UpperCamelCase ( self , A_ ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = self.next_tokens(A_ )
return len(A_ ) == 0
def __UpperCamelCase ( self , A_ ) -> List[str]:
"""simple docstring"""
UpperCamelCase = list(root.values() )
if len(A_ ) == 0:
return 1
else:
return sum([self.count_leaves(A_ ) for nn in next_nodes] )
def __UpperCamelCase ( self , A_ , A_ ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = self.count_leaves(A_ )
return len(A_ ) != leaf_count
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ ) -> str:
"""simple docstring"""
super(A_ , self ).__init__()
if not isinstance(A_ , A_ ) or len(A_ ) == 0:
raise ValueError(F'''`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.''' )
if any(not isinstance(A_ , A_ ) for token_ids in nested_token_ids ):
raise ValueError(F'''`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.''' )
if any(
any((not isinstance(A_ , A_ ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
F'''Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.''' )
UpperCamelCase = DisjunctiveTrie(A_ )
UpperCamelCase = nested_token_ids
UpperCamelCase = self.trie.max_height
UpperCamelCase = []
UpperCamelCase = False
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = self.trie.next_tokens(self.current_seq )
if len(A_ ) == 0:
return None
else:
return token_list
def __UpperCamelCase ( self , A_ ) -> Optional[Any]:
"""simple docstring"""
if not isinstance(A_ , A_ ):
raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(A_ )}''' )
UpperCamelCase = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def __UpperCamelCase ( self , A_ ) -> Optional[Any]:
"""simple docstring"""
if not isinstance(A_ , A_ ):
raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(A_ )}''' )
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
if self.does_advance(A_ ):
self.current_seq.append(A_ )
UpperCamelCase = True
else:
UpperCamelCase = True
self.reset()
UpperCamelCase = self.trie.reached_leaf(self.current_seq )
UpperCamelCase = completed
return stepped, completed, reset
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
UpperCamelCase = False
UpperCamelCase = []
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def __UpperCamelCase ( self , A_=False ) -> int:
"""simple docstring"""
UpperCamelCase = DisjunctiveConstraint(self.token_ids )
if stateful:
UpperCamelCase = self.seqlen
UpperCamelCase = self.current_seq
UpperCamelCase = self.completed
return new_constraint
class lowercase :
def __init__( self , A_ ) -> Tuple:
"""simple docstring"""
UpperCamelCase = constraints
# max # of steps required to fulfill a given constraint
UpperCamelCase = max([c.seqlen for c in constraints] )
UpperCamelCase = len(A_ )
UpperCamelCase = False
self.init_state()
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = []
UpperCamelCase = None
UpperCamelCase = [constraint.copy(stateful=A_ ) for constraint in self.constraints]
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
UpperCamelCase = constraint.advance()
if isinstance(A_ , A_ ):
token_list.append(A_ )
elif isinstance(A_ , A_ ):
token_list.extend(A_ )
else:
UpperCamelCase = self.inprogress_constraint.advance()
if isinstance(A_ , A_ ):
token_list.append(A_ )
elif isinstance(A_ , A_ ):
token_list.extend(A_ )
if len(A_ ) == 0:
return None
else:
return token_list
def __UpperCamelCase ( self , A_ ) -> Any:
"""simple docstring"""
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
UpperCamelCase , UpperCamelCase = self.add(A_ )
# the entire list of constraints are fulfilled
if self.completed:
break
def __UpperCamelCase ( self , A_ ) -> int:
"""simple docstring"""
if not isinstance(A_ , A_ ):
raise ValueError(F'''`token_id` should be an `int`, but is `{token_id}`.''' )
UpperCamelCase , UpperCamelCase = False, False
if self.completed:
UpperCamelCase = True
UpperCamelCase = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
UpperCamelCase , UpperCamelCase , UpperCamelCase = self.inprogress_constraint.update(A_ )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=A_ ) )
UpperCamelCase = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
UpperCamelCase = None
if len(self.pending_constraints ) == 0:
# we're done!
UpperCamelCase = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(A_ ):
UpperCamelCase , UpperCamelCase , UpperCamelCase = pending_constraint.update(A_ )
if not stepped:
raise Exception(
'`constraint.update(token_id)` is not yielding incremental progress, '
'even though `constraint.does_advance(token_id)` is true.' )
if complete:
self.complete_constraints.append(A_ )
UpperCamelCase = None
if not complete and stepped:
UpperCamelCase = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
UpperCamelCase = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
UpperCamelCase = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def __UpperCamelCase ( self , A_=True ) -> Tuple:
"""simple docstring"""
UpperCamelCase = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
UpperCamelCase = [
constraint.copy(stateful=A_ ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
UpperCamelCase = self.inprogress_constraint.copy(stateful=A_ )
UpperCamelCase = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 3 | 0 |
import math
def A ( lowercase ) -> list:
'''simple docstring'''
UpperCamelCase = [True] * n
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
UpperCamelCase = i * 2
while index < n:
UpperCamelCase = False
UpperCamelCase = index + i
UpperCamelCase = [2]
for i in range(3 , lowercase , 2 ):
if is_prime[i]:
primes.append(lowercase )
return primes
def A ( lowercase = 999_966_663_333 ) -> int:
'''simple docstring'''
UpperCamelCase = math.floor(math.sqrt(lowercase ) ) + 100
UpperCamelCase = prime_sieve(lowercase )
UpperCamelCase = 0
UpperCamelCase = 0
UpperCamelCase = primes[prime_index]
while (last_prime**2) <= limit:
UpperCamelCase = primes[prime_index + 1]
UpperCamelCase = last_prime**2
UpperCamelCase = next_prime**2
# Get numbers divisible by lps(current)
UpperCamelCase = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
UpperCamelCase = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
UpperCamelCase = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
UpperCamelCase = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 717 |
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
_UpperCAmelCase : str = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
@register_to_config
def __init__( self , A_ , A_ = None , A_ = None ) -> Any:
"""simple docstring"""
super().__init__()
UpperCamelCase = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
UpperCamelCase = torch.zeros(A_ , A_ )
else:
UpperCamelCase = None
UpperCamelCase = torch.nn.Parameter(A_ )
class lowercase ( _SCREAMING_SNAKE_CASE ):
__lowercase : VQModel
__lowercase : CLIPTextModel
__lowercase : CLIPTokenizer
__lowercase : TransformeraDModel
__lowercase : LearnedClassifierFreeSamplingEmbeddings
__lowercase : VQDiffusionScheduler
def __init__( self , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
self.register_modules(
vqvae=A_ , transformer=A_ , text_encoder=A_ , tokenizer=A_ , scheduler=A_ , learned_classifier_free_sampling_embeddings=A_ , )
def __UpperCamelCase ( self , A_ , A_ , A_ ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = len(A_ ) if isinstance(A_ , A_ ) else 1
# get prompt text embeddings
UpperCamelCase = self.tokenizer(
A_ , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
UpperCamelCase = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
UpperCamelCase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
F''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
UpperCamelCase = text_input_ids[:, : self.tokenizer.model_max_length]
UpperCamelCase = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
UpperCamelCase = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=A_ )
# duplicate text embeddings for each generation per prompt
UpperCamelCase = prompt_embeds.repeat_interleave(A_ , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
UpperCamelCase = self.learned_classifier_free_sampling_embeddings.embeddings
UpperCamelCase = negative_prompt_embeds.unsqueeze(0 ).repeat(A_ , 1 , 1 )
else:
UpperCamelCase = [''] * batch_size
UpperCamelCase = text_input_ids.shape[-1]
UpperCamelCase = self.tokenizer(
A_ , padding='max_length' , max_length=A_ , truncation=A_ , return_tensors='pt' , )
UpperCamelCase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
UpperCamelCase = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=A_ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCamelCase = negative_prompt_embeds.shape[1]
UpperCamelCase = negative_prompt_embeds.repeat(1 , A_ , 1 )
UpperCamelCase = negative_prompt_embeds.view(batch_size * num_images_per_prompt , A_ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCamelCase = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self , A_ , A_ = 100 , A_ = 5.0 , A_ = 1.0 , A_ = 1 , A_ = None , A_ = None , A_ = "pil" , A_ = True , A_ = None , A_ = 1 , ) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
if isinstance(A_ , A_ ):
UpperCamelCase = 1
elif isinstance(A_ , A_ ):
UpperCamelCase = len(A_ )
else:
raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(A_ )}''' )
UpperCamelCase = batch_size * num_images_per_prompt
UpperCamelCase = guidance_scale > 1.0
UpperCamelCase = self._encode_prompt(A_ , A_ , A_ )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(A_ , A_ ) or callback_steps <= 0)
):
raise ValueError(
F'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
F''' {type(A_ )}.''' )
# get the initial completely masked latents unless the user supplied it
UpperCamelCase = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
UpperCamelCase = self.transformer.num_vector_embeds - 1
UpperCamelCase = torch.full(A_ , A_ ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'
F''' {self.transformer.num_vector_embeds - 1} (inclusive).''' )
UpperCamelCase = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(A_ , device=self.device )
UpperCamelCase = self.scheduler.timesteps.to(self.device )
UpperCamelCase = latents
for i, t in enumerate(self.progress_bar(A_ ) ):
# expand the sample if we are doing classifier free guidance
UpperCamelCase = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
UpperCamelCase = self.transformer(A_ , encoder_hidden_states=A_ , timestep=A_ ).sample
if do_classifier_free_guidance:
UpperCamelCase , UpperCamelCase = model_output.chunk(2 )
UpperCamelCase = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(A_ , dim=1 , keepdim=A_ )
UpperCamelCase = self.truncate(A_ , A_ )
# remove `log(0)`'s (`-inf`s)
UpperCamelCase = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase = self.scheduler.step(A_ , timestep=A_ , sample=A_ , generator=A_ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(A_ , A_ , A_ )
UpperCamelCase = self.vqvae.config.vq_embed_dim
UpperCamelCase = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
UpperCamelCase = self.vqvae.quantize.get_codebook_entry(A_ , shape=A_ )
UpperCamelCase = self.vqvae.decode(A_ , force_not_quantize=A_ ).sample
UpperCamelCase = (image / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCamelCase = self.numpy_to_pil(A_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A_ )
def __UpperCamelCase ( self , A_ , A_ ) -> torch.FloatTensor:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = torch.sort(A_ , 1 , descending=A_ )
UpperCamelCase = torch.exp(A_ )
UpperCamelCase = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
UpperCamelCase = torch.full_like(keep_mask[:, 0:1, :] , A_ )
UpperCamelCase = torch.cat((all_true, keep_mask) , dim=1 )
UpperCamelCase = keep_mask[:, :-1, :]
UpperCamelCase = keep_mask.gather(1 , indices.argsort(1 ) )
UpperCamelCase = log_p_x_0.clone()
UpperCamelCase = -torch.inf # -inf = log(0)
return rv
| 3 | 0 |
def A ( lowercase ) -> int:
'''simple docstring'''
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
UpperCamelCase = 1
UpperCamelCase = 1
while repunit:
UpperCamelCase = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def A ( lowercase = 1_000_000 ) -> int:
'''simple docstring'''
UpperCamelCase = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(lowercase ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(F'''{solution() = }''')
| 718 |
from string import ascii_uppercase
_UpperCAmelCase : Dict = {char: i for i, char in enumerate(ascii_uppercase)}
_UpperCAmelCase : Tuple = dict(enumerate(ascii_uppercase))
def A ( lowercase , lowercase ) -> str:
'''simple docstring'''
UpperCamelCase = len(lowercase )
UpperCamelCase = 0
while True:
if x == i:
UpperCamelCase = 0
if len(lowercase ) == len(lowercase ):
break
key += key[i]
i += 1
return key
def A ( lowercase , lowercase ) -> str:
'''simple docstring'''
UpperCamelCase = ''
UpperCamelCase = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
UpperCamelCase = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def A ( lowercase , lowercase ) -> str:
'''simple docstring'''
UpperCamelCase = ''
UpperCamelCase = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
UpperCamelCase = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def A ( ) -> None:
'''simple docstring'''
UpperCamelCase = 'THE GERMAN ATTACK'
UpperCamelCase = 'SECRET'
UpperCamelCase = generate_key(lowercase , lowercase )
UpperCamelCase = cipher_text(lowercase , lowercase )
print(f'''Encrypted Text = {s}''' )
print(f'''Original Text = {original_text(lowercase , lowercase )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 3 | 0 |
import math
from collections.abc import Iterator
from itertools import takewhile
def A ( lowercase ) -> str:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__a ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A ( ) -> List[Any]:
'''simple docstring'''
UpperCamelCase = 2
while True:
if is_prime(__a ):
yield num
num += 1
def A ( lowercase = 2_000_000 ) -> Any:
'''simple docstring'''
return sum(takewhile(lambda lowercase : x < n , prime_generator() ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 719 |
from collections.abc import Callable
def A ( lowercase , lowercase , lowercase ) -> float:
'''simple docstring'''
UpperCamelCase = a
UpperCamelCase = b
if function(lowercase ) == 0: # one of the a or b is a root for the function
return a
elif function(lowercase ) == 0:
return b
elif (
function(lowercase ) * function(lowercase ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('could not find root in given interval.' )
else:
UpperCamelCase = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(lowercase ) == 0:
return mid
elif function(lowercase ) * function(lowercase ) < 0:
UpperCamelCase = mid
else:
UpperCamelCase = mid
UpperCamelCase = start + (end - start) / 2.0
return mid
def A ( lowercase ) -> float:
'''simple docstring'''
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1_000))
import doctest
doctest.testmod()
| 3 | 0 |
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase : Tuple = logging.get_logger(__name__)
_UpperCAmelCase : Union[str, Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
_UpperCAmelCase : List[str] = {
"""vocab_file""": {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json""",
"""allenai/longformer-large-4096""": (
"""https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json"""
),
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json"""
),
},
"""merges_file""": {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt""",
"""allenai/longformer-large-4096""": (
"""https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt"""
),
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt"""
),
},
}
_UpperCAmelCase : Optional[Any] = {
"""allenai/longformer-base-4096""": 4_096,
"""allenai/longformer-large-4096""": 4_096,
"""allenai/longformer-large-4096-finetuned-triviaqa""": 4_096,
"""allenai/longformer-base-4096-extra.pos.embd.only""": 4_096,
"""allenai/longformer-large-4096-extra.pos.embd.only""": 4_096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def A ( ) -> List[str]:
'''simple docstring'''
UpperCamelCase = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
UpperCamelCase = bs[:]
UpperCamelCase = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowercase )
cs.append(2**8 + n )
n += 1
UpperCamelCase = [chr(lowercase ) for n in cs]
return dict(zip(lowercase , lowercase ) )
def A ( lowercase ) -> List[Any]:
'''simple docstring'''
UpperCamelCase = set()
UpperCamelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCamelCase = char
return pairs
class lowercase ( _snake_case ):
__lowercase : Optional[Any] = VOCAB_FILES_NAMES
__lowercase : Dict = PRETRAINED_VOCAB_FILES_MAP
__lowercase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : Any = ["""input_ids""", """attention_mask"""]
def __init__( self , A_ , A_ , A_="replace" , A_="<s>" , A_="</s>" , A_="</s>" , A_="<s>" , A_="<unk>" , A_="<pad>" , A_="<mask>" , A_=False , **A_ , ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else bos_token
UpperCamelCase = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else eos_token
UpperCamelCase = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else sep_token
UpperCamelCase = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else cls_token
UpperCamelCase = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else unk_token
UpperCamelCase = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else mask_token
super().__init__(
errors=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , cls_token=snake_case_ , pad_token=snake_case_ , mask_token=snake_case_ , add_prefix_space=snake_case_ , **snake_case_ , )
with open(snake_case_ , encoding='utf-8' ) as vocab_handle:
UpperCamelCase = json.load(snake_case_ )
UpperCamelCase = {v: k for k, v in self.encoder.items()}
UpperCamelCase = errors # how to handle errors in decoding
UpperCamelCase = bytes_to_unicode()
UpperCamelCase = {v: k for k, v in self.byte_encoder.items()}
with open(snake_case_ , encoding='utf-8' ) as merges_handle:
UpperCamelCase = merges_handle.read().split('\n' )[1:-1]
UpperCamelCase = [tuple(merge.split() ) for merge in bpe_merges]
UpperCamelCase = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
UpperCamelCase = {}
UpperCamelCase = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCamelCase = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
return len(self.encoder )
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def __UpperCamelCase ( self , A_ ) -> List[str]:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
UpperCamelCase = tuple(snake_case_ )
UpperCamelCase = get_pairs(snake_case_ )
if not pairs:
return token
while True:
UpperCamelCase = min(snake_case_ , key=lambda A_ : self.bpe_ranks.get(snake_case_ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
UpperCamelCase = bigram
UpperCamelCase = []
UpperCamelCase = 0
while i < len(snake_case_ ):
try:
UpperCamelCase = word.index(snake_case_ , snake_case_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCamelCase = j
if word[i] == first and i < len(snake_case_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCamelCase = tuple(snake_case_ )
UpperCamelCase = new_word
if len(snake_case_ ) == 1:
break
else:
UpperCamelCase = get_pairs(snake_case_ )
UpperCamelCase = " ".join(snake_case_ )
UpperCamelCase = word
return word
def __UpperCamelCase ( self , A_ ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = []
for token in re.findall(self.pat , snake_case_ ):
UpperCamelCase = "".join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(snake_case_ ).split(' ' ) )
return bpe_tokens
def __UpperCamelCase ( self , A_ ) -> List[Any]:
"""simple docstring"""
return self.encoder.get(snake_case_ , self.encoder.get(self.unk_token ) )
def __UpperCamelCase ( self , A_ ) -> Optional[Any]:
"""simple docstring"""
return self.decoder.get(snake_case_ )
def __UpperCamelCase ( self , A_ ) -> Any:
"""simple docstring"""
UpperCamelCase = "".join(snake_case_ )
UpperCamelCase = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def __UpperCamelCase ( self , A_ , A_ = None ) -> Optional[Any]:
"""simple docstring"""
if not os.path.isdir(snake_case_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCamelCase = os.path.join(
snake_case_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
UpperCamelCase = os.path.join(
snake_case_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(snake_case_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=snake_case_ , ensure_ascii=snake_case_ ) + '\n' )
UpperCamelCase = 0
with open(snake_case_ , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda A_ : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
' Please check that the tokenizer is not corrupted!' )
UpperCamelCase = token_index
writer.write(' '.join(snake_case_ ) + '\n' )
index += 1
return vocab_file, merge_file
def __UpperCamelCase ( self , A_ , A_ = None ) -> List[Any]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
UpperCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __UpperCamelCase ( self , A_ , A_ = None , A_ = False ) -> int:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case_ , token_ids_a=snake_case_ , already_has_special_tokens=snake_case_ )
if token_ids_a is None:
return [1] + ([0] * len(snake_case_ )) + [1]
return [1] + ([0] * len(snake_case_ )) + [1, 1] + ([0] * len(snake_case_ )) + [1]
def __UpperCamelCase ( self , A_ , A_ = None ) -> int:
"""simple docstring"""
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCamelCase ( self , A_ , A_=False , **A_ ) -> List[str]:
"""simple docstring"""
UpperCamelCase = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(snake_case_ ) > 0 and not text[0].isspace()):
UpperCamelCase = " " + text
return (text, kwargs)
| 720 |
import os
_UpperCAmelCase : int = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 100, "D": 500, "M": 1_000}
def A ( lowercase ) -> int:
'''simple docstring'''
UpperCamelCase = 0
UpperCamelCase = 0
while index < len(lowercase ) - 1:
UpperCamelCase = SYMBOLS[numerals[index]]
UpperCamelCase = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def A ( lowercase ) -> str:
'''simple docstring'''
UpperCamelCase = ''
UpperCamelCase = num // 1_000
numerals += m_count * "M"
num %= 1_000
UpperCamelCase = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
UpperCamelCase = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def A ( lowercase = "/p089_roman.txt" ) -> int:
'''simple docstring'''
UpperCamelCase = 0
with open(os.path.dirname(lowercase ) + roman_numerals_filename ) as filea:
UpperCamelCase = filea.readlines()
for line in lines:
UpperCamelCase = line.strip()
UpperCamelCase = parse_roman_numerals(lowercase )
UpperCamelCase = generate_roman_numerals(lowercase )
savings += len(lowercase ) - len(lowercase )
return savings
if __name__ == "__main__":
print(F'''{solution() = }''')
| 3 | 0 |
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : Any = False, False, False
@dataclass
class lowercase :
__lowercase : List[Any] = None
__lowercase : Tuple = True
__lowercase : Any = True
__lowercase : Union[str, Any] = None
# Automatically constructed
__lowercase : int = "dict"
__lowercase : int = pa.struct({"bytes": pa.binary(), "path": pa.string()} )
__lowercase : Tuple = field(default="Audio" , init=lowercase__ , repr=lowercase__ )
def __call__( self ) -> Tuple:
"""simple docstring"""
return self.pa_type
def __UpperCamelCase ( self , A_ ) -> Optional[int]:
"""simple docstring"""
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError('To support encoding audio data, please install \'soundfile\'.' ) from err
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
return {"bytes": None, "path": value}
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
UpperCamelCase = BytesIO()
sf.write(UpperCAmelCase__ , value['array'] , value['sampling_rate'] , format='wav' )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get('path' ) is not None and os.path.isfile(value['path'] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith('pcm' ):
# "PCM" only has raw audio bytes
if value.get('sampling_rate' ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError('To use PCM files, please specify a \'sampling_rate\' in Audio object' )
if value.get('bytes' ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
UpperCamelCase = np.frombuffer(value['bytes'] , dtype=np.intaa ).astype(np.floataa ) / 32_767
else:
UpperCamelCase = np.memmap(value['path'] , dtype='h' , mode='r' ).astype(np.floataa ) / 32_767
UpperCamelCase = BytesIO(bytes() )
sf.write(UpperCAmelCase__ , UpperCAmelCase__ , value['sampling_rate'] , format='wav' )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get('path' )}
elif value.get('bytes' ) is not None or value.get('path' ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get('bytes' ), "path": value.get('path' )}
else:
raise ValueError(
F'''An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def __UpperCamelCase ( self , A_ , A_ = None ) -> Union[str, Any]:
"""simple docstring"""
if not self.decode:
raise RuntimeError('Decoding is disabled for this feature. Please use Audio(decode=True) instead.' )
UpperCamelCase = (value['''path'''], BytesIO(value['bytes'] )) if value['''bytes'''] is not None else (value['''path'''], None)
if path is None and file is None:
raise ValueError(F'''An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError('To support decoding audio files, please install \'librosa\' and \'soundfile\'.' ) from err
UpperCamelCase = xsplitext(UpperCAmelCase__ )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
'Decoding \'opus\' files requires system library \'libsndfile\'>=1.0.31, '
'You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ' )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
'Decoding \'mp3\' files requires system library \'libsndfile\'>=1.1.0, '
'You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ' )
if file is None:
UpperCamelCase = token_per_repo_id or {}
UpperCamelCase = path.split('::' )[-1]
try:
UpperCamelCase = string_to_dict(UpperCAmelCase__ , config.HUB_DATASETS_URL )['''repo_id''']
UpperCamelCase = token_per_repo_id[repo_id]
except (ValueError, KeyError):
UpperCamelCase = None
with xopen(UpperCAmelCase__ , 'rb' , use_auth_token=UpperCAmelCase__ ) as f:
UpperCamelCase = sf.read(UpperCAmelCase__ )
else:
UpperCamelCase = sf.read(UpperCAmelCase__ )
UpperCamelCase = array.T
if self.mono:
UpperCamelCase = librosa.to_mono(UpperCAmelCase__ )
if self.sampling_rate and self.sampling_rate != sampling_rate:
UpperCamelCase = librosa.resample(UpperCAmelCase__ , orig_sr=UpperCAmelCase__ , target_sr=self.sampling_rate )
UpperCamelCase = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
from .features import Value
if self.decode:
raise ValueError('Cannot flatten a decoded Audio feature.' )
return {
"bytes": Value('binary' ),
"path": Value('string' ),
}
def __UpperCamelCase ( self , A_ ) -> int:
"""simple docstring"""
if pa.types.is_string(storage.type ):
UpperCamelCase = pa.array([None] * len(UpperCAmelCase__ ) , type=pa.binary() )
UpperCamelCase = pa.StructArray.from_arrays([bytes_array, storage] , ['bytes', 'path'] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
UpperCamelCase = pa.array([None] * len(UpperCAmelCase__ ) , type=pa.string() )
UpperCamelCase = pa.StructArray.from_arrays([storage, path_array] , ['bytes', 'path'] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices('array' ):
UpperCamelCase = pa.array([Audio().encode_example(UpperCAmelCase__ ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('bytes' ) >= 0:
UpperCamelCase = storage.field('bytes' )
else:
UpperCamelCase = pa.array([None] * len(UpperCAmelCase__ ) , type=pa.binary() )
if storage.type.get_field_index('path' ) >= 0:
UpperCamelCase = storage.field('path' )
else:
UpperCamelCase = pa.array([None] * len(UpperCAmelCase__ ) , type=pa.string() )
UpperCamelCase = pa.StructArray.from_arrays([bytes_array, path_array] , ['bytes', 'path'] , mask=storage.is_null() )
return array_cast(UpperCAmelCase__ , self.pa_type )
def __UpperCamelCase ( self , A_ ) -> Union[str, Any]:
"""simple docstring"""
@no_op_if_value_is_null
def path_to_bytes(A_ ):
with xopen(UpperCAmelCase__ , 'rb' ) as f:
UpperCamelCase = f.read()
return bytes_
UpperCamelCase = pa.array(
[
(path_to_bytes(x['path'] ) if x['bytes'] is None else x['bytes']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
UpperCamelCase = pa.array(
[os.path.basename(UpperCAmelCase__ ) if path is not None else None for path in storage.field('path' ).to_pylist()] , type=pa.string() , )
UpperCamelCase = pa.StructArray.from_arrays([bytes_array, path_array] , ['bytes', 'path'] , mask=bytes_array.is_null() )
return array_cast(UpperCAmelCase__ , self.pa_type )
| 721 |
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize('dataset_size' , [None, 400 * 2**20, 600 * 2**20] )
@pytest.mark.parametrize('input_in_memory_max_size' , ['default', 0, 100 * 2**20, 900 * 2**20] )
def A ( lowercase , lowercase , lowercase ) -> Union[str, Any]:
'''simple docstring'''
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , 'IN_MEMORY_MAX_SIZE' , lowercase )
UpperCamelCase = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
UpperCamelCase = dataset_size < in_memory_max_size
else:
UpperCamelCase = False
UpperCamelCase = is_small_dataset(lowercase )
assert result == expected
| 3 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCAmelCase : Any = logging.get_logger(__name__)
_UpperCAmelCase : Union[str, Any] = {
"bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/config.json",
"bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/config.json",
"bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/config.json",
"bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/config.json",
"bert-base-multilingual-uncased": "https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json",
"bert-base-multilingual-cased": "https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json",
"bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/config.json",
"bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/config.json",
"bert-large-uncased-whole-word-masking": (
"https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"
),
"bert-large-cased-whole-word-masking": (
"https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"
),
"bert-base-cased-finetuned-mrpc": "https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json",
"bert-base-german-dbmdz-cased": "https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json",
"bert-base-german-dbmdz-uncased": "https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json",
"cl-tohoku/bert-base-japanese": "https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json",
"cl-tohoku/bert-base-japanese-whole-word-masking": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"
),
"cl-tohoku/bert-base-japanese-char": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"
),
"cl-tohoku/bert-base-japanese-char-whole-word-masking": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"
),
"TurkuNLP/bert-base-finnish-cased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"
),
"TurkuNLP/bert-base-finnish-uncased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"
),
"wietsedv/bert-base-dutch-cased": "https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class lowercase ( __UpperCAmelCase ):
__lowercase : int = "bert"
def __init__( self , A_=30_522 , A_=768 , A_=12 , A_=12 , A_=3_072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=2 , A_=0.02 , A_=1e-12 , A_=0 , A_="absolute" , A_=True , A_=None , **A_ , ) -> Dict:
"""simple docstring"""
super().__init__(pad_token_id=_lowerCamelCase , **_lowerCamelCase )
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = hidden_act
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
UpperCamelCase = position_embedding_type
UpperCamelCase = use_cache
UpperCamelCase = classifier_dropout
class lowercase ( __UpperCAmelCase ):
@property
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
if self.task == "multiple-choice":
UpperCamelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
UpperCamelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 700 |
def A ( lowercase , lowercase ) -> str:
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
UpperCamelCase = str(bin(lowercase ) )[2:] # remove the leading "0b"
UpperCamelCase = str(bin(lowercase ) )[2:] # remove the leading "0b"
UpperCamelCase = max(len(lowercase ) , len(lowercase ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(lowercase ) , b_binary.zfill(lowercase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 3 | 0 |
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase :
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=False , A_=True , A_=99 , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=3 , A_=4 , A_=None , ) -> Tuple:
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_input_mask
UpperCamelCase = use_token_type_ids
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = num_labels
UpperCamelCase = num_choices
UpperCamelCase = scope
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase = None
if self.use_input_mask:
UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase = None
if self.use_token_type_ids:
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , )
def __UpperCamelCase ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = BioGptModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
UpperCamelCase = model(__lowerCamelCase , attention_mask=__lowerCamelCase )
UpperCamelCase = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = BioGptForCausalLM(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
UpperCamelCase = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self , A_ , A_ , A_ , A_ , A_ , *A_ ) -> Dict:
"""simple docstring"""
UpperCamelCase = BioGptModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
# create attention mask
UpperCamelCase = torch.ones(input_ids.shape , dtype=torch.long , device=__lowerCamelCase )
UpperCamelCase = self.seq_length // 2
UpperCamelCase = 0
# first forward pass
UpperCamelCase = model(__lowerCamelCase , attention_mask=__lowerCamelCase ).to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCamelCase = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
UpperCamelCase = ids_tensor((1,) , __lowerCamelCase ).item() + 1
UpperCamelCase = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
UpperCamelCase = random_other_next_tokens
# append to next input_ids and attn_mask
UpperCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=__lowerCamelCase )] , dim=1 , )
# get two different outputs
UpperCamelCase = model(__lowerCamelCase , attention_mask=__lowerCamelCase )['''last_hidden_state''']
UpperCamelCase = model(__lowerCamelCase , past_key_values=__lowerCamelCase , attention_mask=__lowerCamelCase )['''last_hidden_state''']
# select random slice
UpperCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase = output_from_no_past[:, -1, random_slice_idx].detach()
UpperCamelCase = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-3 ) )
def __UpperCamelCase ( self , A_ , A_ , A_ , A_ , A_ , *A_ ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = BioGptModel(config=__lowerCamelCase ).to(__lowerCamelCase ).eval()
UpperCamelCase = torch.ones(input_ids.shape , dtype=torch.long , device=__lowerCamelCase )
# first forward pass
UpperCamelCase = model(__lowerCamelCase , attention_mask=__lowerCamelCase , use_cache=__lowerCamelCase )
UpperCamelCase = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
UpperCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
UpperCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
UpperCamelCase = model(__lowerCamelCase , attention_mask=__lowerCamelCase )['''last_hidden_state''']
UpperCamelCase = model(__lowerCamelCase , attention_mask=__lowerCamelCase , past_key_values=__lowerCamelCase )[
'''last_hidden_state'''
]
# select random slice
UpperCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCamelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-3 ) )
def __UpperCamelCase ( self , A_ , A_ , A_ , A_ , A_ , *A_ , A_=False ) -> Tuple:
"""simple docstring"""
UpperCamelCase = BioGptForCausalLM(__lowerCamelCase )
model.to(__lowerCamelCase )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
UpperCamelCase = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def __UpperCamelCase ( self , A_ , *A_ ) -> Any:
"""simple docstring"""
UpperCamelCase = BioGptModel(__lowerCamelCase )
UpperCamelCase = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.001 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 )
def __UpperCamelCase ( self , A_ , A_ , A_ , A_ , A_ , *A_ ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = self.num_labels
UpperCamelCase = BioGptForTokenClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
UpperCamelCase = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
(
UpperCamelCase
) = config_and_inputs
UpperCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
__lowercase : Tuple = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
__lowercase : str = (BioGptForCausalLM,) if is_torch_available() else ()
__lowercase : Tuple = (
{
"feature-extraction": BioGptModel,
"text-classification": BioGptForSequenceClassification,
"text-generation": BioGptForCausalLM,
"token-classification": BioGptForTokenClassification,
"zero-shot": BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowercase : str = False
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = BioGptModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=37 )
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCamelCase = type
self.model_tester.create_and_check_model(*__lowerCamelCase )
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*__lowerCamelCase )
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*__lowerCamelCase , gradient_checkpointing=__lowerCamelCase )
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*__lowerCamelCase )
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*__lowerCamelCase )
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*__lowerCamelCase )
@slow
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = BioGptForCausalLM.from_pretrained('microsoft/biogpt' )
model.to(__lowerCamelCase )
UpperCamelCase = BioGptTokenizer.from_pretrained('microsoft/biogpt' )
UpperCamelCase = '''left'''
# Define PAD Token = EOS Token = 50256
UpperCamelCase = tokenizer.eos_token
UpperCamelCase = model.config.eos_token_id
# use different length sentences to test batching
UpperCamelCase = [
'''Hello, my dog is a little''',
'''Today, I''',
]
UpperCamelCase = tokenizer(__lowerCamelCase , return_tensors='pt' , padding=__lowerCamelCase )
UpperCamelCase = inputs['''input_ids'''].to(__lowerCamelCase )
UpperCamelCase = model.generate(
input_ids=__lowerCamelCase , attention_mask=inputs['attention_mask'].to(__lowerCamelCase ) , )
UpperCamelCase = tokenizer(sentences[0] , return_tensors='pt' ).input_ids.to(__lowerCamelCase )
UpperCamelCase = model.generate(input_ids=__lowerCamelCase )
UpperCamelCase = inputs_non_padded.shape[-1] - inputs['''attention_mask'''][-1].long().sum().cpu().item()
UpperCamelCase = tokenizer(sentences[1] , return_tensors='pt' ).input_ids.to(__lowerCamelCase )
UpperCamelCase = model.generate(input_ids=__lowerCamelCase , max_length=model.config.max_length - num_paddings )
UpperCamelCase = tokenizer.batch_decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase )
UpperCamelCase = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__lowerCamelCase )
UpperCamelCase = tokenizer.decode(output_padded[0] , skip_special_tokens=__lowerCamelCase )
UpperCamelCase = [
'''Hello, my dog is a little bit bigger than a little bit.''',
'''Today, I have a good idea of how to use the information''',
]
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
self.assertListEqual(__lowerCamelCase , [non_padded_sentence, padded_sentence] )
@slow
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = BioGptModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = 3
UpperCamelCase = input_dict['''input_ids''']
UpperCamelCase = input_ids.ne(1 ).to(__lowerCamelCase )
UpperCamelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCamelCase = BioGptForSequenceClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
UpperCamelCase = model(__lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = 3
UpperCamelCase = '''multi_label_classification'''
UpperCamelCase = input_dict['''input_ids''']
UpperCamelCase = input_ids.ne(1 ).to(__lowerCamelCase )
UpperCamelCase = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
UpperCamelCase = BioGptForSequenceClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
UpperCamelCase = model(__lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class lowercase ( unittest.TestCase ):
@slow
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = BioGptForCausalLM.from_pretrained('microsoft/biogpt' )
UpperCamelCase = torch.tensor([[2, 4_805, 9, 656, 21]] )
UpperCamelCase = model(__lowerCamelCase )[0]
UpperCamelCase = 42_384
UpperCamelCase = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , __lowerCamelCase )
UpperCamelCase = torch.tensor(
[[[-9.5236, -9.8918, 10.4557], [-11.0469, -9.6423, 8.1022], [-8.8664, -7.8826, 5.5325]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __lowerCamelCase , atol=1e-4 ) )
@slow
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
UpperCamelCase = BioGptTokenizer.from_pretrained('microsoft/biogpt' )
UpperCamelCase = BioGptForCausalLM.from_pretrained('microsoft/biogpt' )
model.to(__lowerCamelCase )
torch.manual_seed(0 )
UpperCamelCase = tokenizer('COVID-19 is' , return_tensors='pt' ).to(__lowerCamelCase )
UpperCamelCase = model.generate(
**__lowerCamelCase , min_length=100 , max_length=1_024 , num_beams=5 , early_stopping=__lowerCamelCase , )
UpperCamelCase = tokenizer.decode(output_ids[0] , skip_special_tokens=__lowerCamelCase )
UpperCamelCase = (
'''COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'''
''' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'''
''' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'''
''' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'''
''' more than 800,000 deaths.'''
)
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
| 701 |
import re
def A ( lowercase ) -> str:
'''simple docstring'''
if len(re.findall('[ATCG]' , lowercase ) ) != len(lowercase ):
raise ValueError('Invalid Strand' )
return dna.translate(dna.maketrans('ATCG' , 'TAGC' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 3 | 0 |
import pickle
import numpy as np
from matplotlib import pyplot as plt
class lowercase :
def __init__( self , A_ , A_ , A_ , A_ , A_ , A_=0.2 , A_=0.2 ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = bp_numa
UpperCamelCase = bp_numa
UpperCamelCase = bp_numa
UpperCamelCase = conva_get[:2]
UpperCamelCase = conva_get[2]
UpperCamelCase = size_pa
UpperCamelCase = rate_w
UpperCamelCase = rate_t
UpperCamelCase = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
UpperCamelCase = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
UpperCamelCase = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
UpperCamelCase = -2 * np.random.rand(self.conva[1] ) + 1
UpperCamelCase = -2 * np.random.rand(self.num_bpa ) + 1
UpperCamelCase = -2 * np.random.rand(self.num_bpa ) + 1
def __UpperCamelCase ( self , A_ ) -> str:
"""simple docstring"""
# save model dict with pickle
UpperCamelCase = {
'num_bp1': self.num_bpa,
'num_bp2': self.num_bpa,
'num_bp3': self.num_bpa,
'conv1': self.conva,
'step_conv1': self.step_conva,
'size_pooling1': self.size_poolinga,
'rate_weight': self.rate_weight,
'rate_thre': self.rate_thre,
'w_conv1': self.w_conva,
'wkj': self.wkj,
'vji': self.vji,
'thre_conv1': self.thre_conva,
'thre_bp2': self.thre_bpa,
'thre_bp3': self.thre_bpa,
}
with open(_lowercase , 'wb' ) as f:
pickle.dump(_lowercase , _lowercase )
print(F'''Model saved: {save_path}''' )
@classmethod
def __UpperCamelCase ( cls , A_ ) -> int:
"""simple docstring"""
# read saved model
with open(_lowercase , 'rb' ) as f:
UpperCamelCase = pickle.load(_lowercase ) # noqa: S301
UpperCamelCase = model_dic.get('conv1' )
conv_get.append(model_dic.get('step_conv1' ) )
UpperCamelCase = model_dic.get('size_pooling1' )
UpperCamelCase = model_dic.get('num_bp1' )
UpperCamelCase = model_dic.get('num_bp2' )
UpperCamelCase = model_dic.get('num_bp3' )
UpperCamelCase = model_dic.get('rate_weight' )
UpperCamelCase = model_dic.get('rate_thre' )
# create model instance
UpperCamelCase = CNN(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
# modify model parameter
UpperCamelCase = model_dic.get('w_conv1' )
UpperCamelCase = model_dic.get('wkj' )
UpperCamelCase = model_dic.get('vji' )
UpperCamelCase = model_dic.get('thre_conv1' )
UpperCamelCase = model_dic.get('thre_bp2' )
UpperCamelCase = model_dic.get('thre_bp3' )
return conv_ins
def __UpperCamelCase ( self , A_ ) -> List[str]:
"""simple docstring"""
return 1 / (1 + np.exp(-1 * x ))
def __UpperCamelCase ( self , A_ ) -> Any:
"""simple docstring"""
return round(_lowercase , 3 )
def __UpperCamelCase ( self , A_ , A_ , A_ , A_ , A_ ) -> List[str]:
"""simple docstring"""
# convolution process
UpperCamelCase = convs[0]
UpperCamelCase = convs[1]
UpperCamelCase = np.shape(_lowercase )[0]
# get the data slice of original image data, data_focus
UpperCamelCase = []
for i_focus in range(0 , size_data - size_conv + 1 , _lowercase ):
for j_focus in range(0 , size_data - size_conv + 1 , _lowercase ):
UpperCamelCase = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(_lowercase )
# calculate the feature map of every single kernel, and saved as list of matrix
UpperCamelCase = []
UpperCamelCase = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(_lowercase ):
UpperCamelCase = []
for i_focus in range(len(_lowercase ) ):
UpperCamelCase = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(_lowercase ) )
UpperCamelCase = np.asmatrix(_lowercase ).reshape(
_lowercase , _lowercase )
data_featuremap.append(_lowercase )
# expanding the data slice to One dimenssion
UpperCamelCase = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(_lowercase ) )
UpperCamelCase = np.asarray(_lowercase )
return focus_list, data_featuremap
def __UpperCamelCase ( self , A_ , A_ , A_="average_pool" ) -> int:
"""simple docstring"""
# pooling process
UpperCamelCase = len(featuremaps[0] )
UpperCamelCase = int(size_map / size_pooling )
UpperCamelCase = []
for i_map in range(len(_lowercase ) ):
UpperCamelCase = featuremaps[i_map]
UpperCamelCase = []
for i_focus in range(0 , _lowercase , _lowercase ):
for j_focus in range(0 , _lowercase , _lowercase ):
UpperCamelCase = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(_lowercase ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(_lowercase ) )
UpperCamelCase = np.asmatrix(_lowercase ).reshape(_lowercase , _lowercase )
featuremap_pooled.append(_lowercase )
return featuremap_pooled
def __UpperCamelCase ( self , A_ ) -> Optional[Any]:
"""simple docstring"""
# expanding three dimension data to one dimension list
UpperCamelCase = []
for i in range(len(_lowercase ) ):
UpperCamelCase = np.shape(data[i] )
UpperCamelCase = data[i].reshape(1 , shapes[0] * shapes[1] )
UpperCamelCase = data_listed.getA().tolist()[0]
data_expanded.extend(_lowercase )
UpperCamelCase = np.asarray(_lowercase )
return data_expanded
def __UpperCamelCase ( self , A_ ) -> Any:
"""simple docstring"""
# expanding matrix to one dimension list
UpperCamelCase = np.asarray(_lowercase )
UpperCamelCase = np.shape(_lowercase )
UpperCamelCase = data_mat.reshape(1 , shapes[0] * shapes[1] )
return data_expanded
def __UpperCamelCase ( self , A_ , A_ , A_ , A_ , A_ ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = []
UpperCamelCase = 0
for i_map in range(_lowercase ):
UpperCamelCase = np.ones((size_map, size_map) )
for i in range(0 , _lowercase , _lowercase ):
for j in range(0 , _lowercase , _lowercase ):
UpperCamelCase = pd_pool[
i_pool
]
UpperCamelCase = i_pool + 1
UpperCamelCase = np.multiply(
_lowercase , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) )
pd_all.append(_lowercase )
return pd_all
def __UpperCamelCase ( self , A_ , A_ , A_ , A_ , A_ , A_=bool ) -> Tuple:
"""simple docstring"""
# model traning
print('----------------------Start Training-------------------------' )
print((' - - Shape: Train_Data ', np.shape(_lowercase )) )
print((' - - Shape: Teach_Data ', np.shape(_lowercase )) )
UpperCamelCase = 0
UpperCamelCase = []
UpperCamelCase = 10_000
while rp < n_repeat and mse >= error_accuracy:
UpperCamelCase = 0
print(F'''-------------Learning Time {rp}--------------''' )
for p in range(len(_lowercase ) ):
# print('------------Learning Image: %d--------------'%p)
UpperCamelCase = np.asmatrix(datas_train[p] )
UpperCamelCase = np.asarray(datas_teach[p] )
UpperCamelCase , UpperCamelCase = self.convolute(
_lowercase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
UpperCamelCase = self.pooling(_lowercase , self.size_poolinga )
UpperCamelCase = np.shape(_lowercase )
UpperCamelCase = self._expand(_lowercase )
UpperCamelCase = data_bp_input
UpperCamelCase = np.dot(_lowercase , self.vji.T ) - self.thre_bpa
UpperCamelCase = self.sig(_lowercase )
UpperCamelCase = np.dot(_lowercase , self.wkj.T ) - self.thre_bpa
UpperCamelCase = self.sig(_lowercase )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
UpperCamelCase = np.multiply(
(data_teach - bp_outa) , np.multiply(_lowercase , (1 - bp_outa) ) )
UpperCamelCase = np.multiply(
np.dot(_lowercase , self.wkj ) , np.multiply(_lowercase , (1 - bp_outa) ) )
UpperCamelCase = np.dot(_lowercase , self.vji )
UpperCamelCase = pd_i_all / (self.size_poolinga * self.size_poolinga)
UpperCamelCase = pd_conva_pooled.T.getA().tolist()
UpperCamelCase = self._calculate_gradient_from_pool(
_lowercase , _lowercase , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
UpperCamelCase = self._expand_mat(pd_conva_all[k_conv] )
UpperCamelCase = self.rate_weight * np.dot(_lowercase , _lowercase )
UpperCamelCase = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
UpperCamelCase = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
UpperCamelCase = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
UpperCamelCase = self.vji + pd_j_all.T * bp_outa * self.rate_weight
UpperCamelCase = self.thre_bpa - pd_k_all * self.rate_thre
UpperCamelCase = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
UpperCamelCase = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
UpperCamelCase = rp + 1
UpperCamelCase = error_count / patterns
all_mse.append(_lowercase )
def draw_error():
UpperCamelCase = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(_lowercase , '+-' )
plt.plot(_lowercase , 'r--' )
plt.xlabel('Learning Times' )
plt.ylabel('All_mse' )
plt.grid(_lowercase , alpha=0.5 )
plt.show()
print('------------------Training Complished---------------------' )
print((' - - Training epoch: ', rp, F''' - - Mse: {mse:.6f}''') )
if draw_e:
draw_error()
return mse
def __UpperCamelCase ( self , A_ ) -> List[str]:
"""simple docstring"""
# model predict
UpperCamelCase = []
print('-------------------Start Testing-------------------------' )
print((' - - Shape: Test_Data ', np.shape(_lowercase )) )
for p in range(len(_lowercase ) ):
UpperCamelCase = np.asmatrix(datas_test[p] )
UpperCamelCase , UpperCamelCase = self.convolute(
_lowercase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
UpperCamelCase = self.pooling(_lowercase , self.size_poolinga )
UpperCamelCase = self._expand(_lowercase )
UpperCamelCase = data_bp_input
UpperCamelCase = bp_outa * self.vji.T - self.thre_bpa
UpperCamelCase = self.sig(_lowercase )
UpperCamelCase = bp_outa * self.wkj.T - self.thre_bpa
UpperCamelCase = self.sig(_lowercase )
produce_out.extend(bp_outa.getA().tolist() )
UpperCamelCase = [list(map(self.do_round , _lowercase ) ) for each in produce_out]
return np.asarray(_lowercase )
def __UpperCamelCase ( self , A_ ) -> Union[str, Any]:
"""simple docstring"""
# return the data of image after convoluting process so we can check it out
UpperCamelCase = np.asmatrix(_lowercase )
UpperCamelCase , UpperCamelCase = self.convolute(
_lowercase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
UpperCamelCase = self.pooling(_lowercase , self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass | 702 |
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class lowercase ( _SCREAMING_SNAKE_CASE ):
__lowercase : Dict = (DDPMScheduler,)
def __UpperCamelCase ( self , **A_ ) -> Dict:
"""simple docstring"""
UpperCamelCase = {
'num_train_timesteps': 1_000,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'variance_type': 'fixed_small',
'clip_sample': True,
}
config.update(**A_ )
return config
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=A_ )
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=A_ , beta_end=A_ )
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=A_ )
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=A_ )
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=A_ )
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
self.check_over_configs(thresholding=A_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=A_ , prediction_type=A_ , sample_max_value=A_ , )
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=A_ )
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
for t in [0, 500, 999]:
self.check_over_forward(time_step=A_ )
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**A_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1e-5
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**A_ )
UpperCamelCase = len(A_ )
UpperCamelCase = self.dummy_model()
UpperCamelCase = self.dummy_sample_deter
UpperCamelCase = torch.manual_seed(0 )
for t in reversed(range(A_ ) ):
# 1. predict noise residual
UpperCamelCase = model(A_ , A_ )
# 2. predict previous mean of sample x_t-1
UpperCamelCase = scheduler.step(A_ , A_ , A_ , generator=A_ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
UpperCamelCase = pred_prev_sample
UpperCamelCase = torch.sum(torch.abs(A_ ) )
UpperCamelCase = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 258.9606 ) < 1e-2
assert abs(result_mean.item() - 0.3372 ) < 1e-3
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config(prediction_type='v_prediction' )
UpperCamelCase = scheduler_class(**A_ )
UpperCamelCase = len(A_ )
UpperCamelCase = self.dummy_model()
UpperCamelCase = self.dummy_sample_deter
UpperCamelCase = torch.manual_seed(0 )
for t in reversed(range(A_ ) ):
# 1. predict noise residual
UpperCamelCase = model(A_ , A_ )
# 2. predict previous mean of sample x_t-1
UpperCamelCase = scheduler.step(A_ , A_ , A_ , generator=A_ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
UpperCamelCase = pred_prev_sample
UpperCamelCase = torch.sum(torch.abs(A_ ) )
UpperCamelCase = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 202.0296 ) < 1e-2
assert abs(result_mean.item() - 0.2631 ) < 1e-3
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**A_ )
UpperCamelCase = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=A_ )
UpperCamelCase = scheduler.timesteps
for i, timestep in enumerate(A_ ):
if i == len(A_ ) - 1:
UpperCamelCase = -1
else:
UpperCamelCase = timesteps[i + 1]
UpperCamelCase = scheduler.previous_timestep(A_ )
UpperCamelCase = prev_t.item()
self.assertEqual(A_ , A_ )
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**A_ )
UpperCamelCase = [100, 87, 50, 51, 0]
with self.assertRaises(A_ , msg='`custom_timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=A_ )
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**A_ )
UpperCamelCase = [100, 87, 50, 1, 0]
UpperCamelCase = len(A_ )
with self.assertRaises(A_ , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=A_ , timesteps=A_ )
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**A_ )
UpperCamelCase = [scheduler.config.num_train_timesteps]
with self.assertRaises(
A_ , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=A_ )
| 3 | 0 |
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class lowercase :
def __init__( self , A_ , A_=13 , A_=30 , A_=2 , A_=3 , A_=True , A_=True , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=10 , A_=0.02 , A_=3 , A_=None , A_=2 , ) -> Tuple:
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = image_size
UpperCamelCase = patch_size
UpperCamelCase = num_channels
UpperCamelCase = is_training
UpperCamelCase = use_labels
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = scope
UpperCamelCase = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
UpperCamelCase = (image_size // patch_size) ** 2
UpperCamelCase = num_patches + 2
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __UpperCamelCase ( self , A_ , A_ , A_ ) -> str:
"""simple docstring"""
UpperCamelCase = DeiTModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCamelCase = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self , A_ , A_ , A_ ) -> Dict:
"""simple docstring"""
UpperCamelCase = DeiTForMaskedImageModeling(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCamelCase = model(lowercase_ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCamelCase = 1
UpperCamelCase = DeiTForMaskedImageModeling(lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase = model(lowercase_ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __UpperCamelCase ( self , A_ , A_ , A_ ) -> str:
"""simple docstring"""
UpperCamelCase = self.type_sequence_label_size
UpperCamelCase = DeiTForImageClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCamelCase = model(lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCamelCase = 1
UpperCamelCase = DeiTForImageClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase = model(lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
(
UpperCamelCase
) = config_and_inputs
UpperCamelCase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( snake_case__ , snake_case__ , unittest.TestCase ):
__lowercase : Dict = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
__lowercase : Any = (
{
"feature-extraction": DeiTModel,
"image-classification": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
__lowercase : Optional[Any] = False
__lowercase : Optional[int] = False
__lowercase : List[str] = False
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = DeiTModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=37 )
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='DeiT does not use inputs_embeds' )
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
pass
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(lowercase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase_ , nn.Linear ) )
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(lowercase_ )
UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase = [*signature.parameters.keys()]
UpperCamelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowercase_ )
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowercase_ )
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_ )
def __UpperCamelCase ( self , A_ , A_ , A_=False ) -> Tuple:
"""simple docstring"""
UpperCamelCase = super()._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_ )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
if not self.model_tester.is_training:
return
UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(lowercase_ )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
UpperCamelCase = model_class(lowercase_ )
model.to(lowercase_ )
model.train()
UpperCamelCase = self._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_ )
UpperCamelCase = model(**lowercase_ ).loss
loss.backward()
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
UpperCamelCase = False
UpperCamelCase = True
for model_class in self.all_model_classes:
if model_class in get_values(lowercase_ ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
UpperCamelCase = model_class(lowercase_ )
model.gradient_checkpointing_enable()
model.to(lowercase_ )
model.train()
UpperCamelCase = self._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_ )
UpperCamelCase = model(**lowercase_ ).loss
loss.backward()
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = [
{"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float},
{"title": "single_label_classification", "num_labels": 1, "dtype": torch.long},
{"title": "regression", "num_labels": 1, "dtype": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(lowercase_ ),
*get_values(lowercase_ ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F'''Testing {model_class} with {problem_type['title']}''' ):
UpperCamelCase = problem_type["title"]
UpperCamelCase = problem_type["num_labels"]
UpperCamelCase = model_class(lowercase_ )
model.to(lowercase_ )
model.train()
UpperCamelCase = self._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_ )
if problem_type["num_labels"] > 1:
UpperCamelCase = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type['num_labels'] )
UpperCamelCase = inputs["labels"].to(problem_type['dtype'] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=lowercase_ ) as warning_list:
UpperCamelCase = model(**lowercase_ ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F'''Something is going wrong in the regression problem: intercepted {w.message}''' )
loss.backward()
@slow
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = DeiTModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def A ( ) -> Any:
'''simple docstring'''
UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
return (
DeiTImageProcessor.from_pretrained('facebook/deit-base-distilled-patch16-224' )
if is_vision_available()
else None
)
@slow
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
UpperCamelCase = DeiTForImageClassificationWithTeacher.from_pretrained('facebook/deit-base-distilled-patch16-224' ).to(
lowercase_ )
UpperCamelCase = self.default_image_processor
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=lowercase_ , return_tensors='pt' ).to(lowercase_ )
# forward pass
with torch.no_grad():
UpperCamelCase = model(**lowercase_ )
# verify the logits
UpperCamelCase = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , lowercase_ )
UpperCamelCase = torch.tensor([-1.0266, 0.1912, -1.2861] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = DeiTModel.from_pretrained(
'facebook/deit-base-distilled-patch16-224' , torch_dtype=torch.floataa , device_map='auto' )
UpperCamelCase = self.default_image_processor
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=lowercase_ , return_tensors='pt' )
UpperCamelCase = inputs.pixel_values.to(lowercase_ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
UpperCamelCase = model(lowercase_ )
| 703 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
_UpperCAmelCase : List[str] = None
_UpperCAmelCase : Any = logging.get_logger(__name__)
_UpperCAmelCase : Tuple = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
_UpperCAmelCase : List[str] = {
"vocab_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model",
},
"tokenizer_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/tokenizer.json",
},
}
_UpperCAmelCase : Optional[int] = {
"camembert-base": 512,
}
_UpperCAmelCase : Union[str, Any] = "▁"
class lowercase ( _SCREAMING_SNAKE_CASE ):
__lowercase : str = VOCAB_FILES_NAMES
__lowercase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
__lowercase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : List[str] = ["input_ids", "attention_mask"]
__lowercase : Tuple = CamembertTokenizer
def __init__( self , A_=None , A_=None , A_="<s>" , A_="</s>" , A_="</s>" , A_="<s>" , A_="<unk>" , A_="<pad>" , A_="<mask>" , A_=["<s>NOTUSED", "</s>NOTUSED"] , **A_ , ) -> List[Any]:
"""simple docstring"""
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else mask_token
super().__init__(
A_ , tokenizer_file=A_ , bos_token=A_ , eos_token=A_ , sep_token=A_ , cls_token=A_ , unk_token=A_ , pad_token=A_ , mask_token=A_ , additional_special_tokens=A_ , **A_ , )
UpperCamelCase = vocab_file
UpperCamelCase = False if not self.vocab_file else True
def __UpperCamelCase ( self , A_ , A_ = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
UpperCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __UpperCamelCase ( self , A_ , A_ = None ) -> List[int]:
"""simple docstring"""
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCamelCase ( self , A_ , A_ = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(A_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCamelCase = os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ):
copyfile(self.vocab_file , A_ )
return (out_vocab_file,)
| 3 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCAmelCase : List[Any] = logging.get_logger(__name__)
_UpperCAmelCase : Union[str, Any] = {
"google/mobilenet_v1_1.0_224": "https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json",
"google/mobilenet_v1_0.75_192": "https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class lowercase ( _UpperCAmelCase ):
__lowercase : Dict = '''mobilenet_v1'''
def __init__( self , A_=3 , A_=224 , A_=1.0 , A_=8 , A_="relu6" , A_=True , A_=0.999 , A_=0.02 , A_=0.001 , **A_ , ) -> Optional[int]:
"""simple docstring"""
super().__init__(**A_ )
if depth_multiplier <= 0:
raise ValueError('depth_multiplier must be greater than zero.' )
UpperCamelCase = num_channels
UpperCamelCase = image_size
UpperCamelCase = depth_multiplier
UpperCamelCase = min_depth
UpperCamelCase = hidden_act
UpperCamelCase = tf_padding
UpperCamelCase = classifier_dropout_prob
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
class lowercase ( _UpperCAmelCase ):
__lowercase : List[str] = version.parse("1.11" )
@property
def __UpperCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict([('pixel_values', {0: 'batch'})] )
@property
def __UpperCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "image-classification":
return OrderedDict([('logits', {0: 'batch'})] )
else:
return OrderedDict([('last_hidden_state', {0: 'batch'}), ('pooler_output', {0: 'batch'})] )
@property
def __UpperCamelCase ( self ) -> float:
"""simple docstring"""
return 1e-4
| 704 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCAmelCase : Union[str, Any] = {
"configuration_git": ["GIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "GitConfig", "GitVisionConfig"],
"processing_git": ["GitProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Dict = [
"GIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"GitForCausalLM",
"GitModel",
"GitPreTrainedModel",
"GitVisionModel",
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
_UpperCAmelCase : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 3 | 0 |
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class lowercase ( unittest.TestCase ):
@slow
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
UpperCamelCase = FlaxMTaForConditionalGeneration.from_pretrained('google/mt5-small' )
UpperCamelCase = AutoTokenizer.from_pretrained('google/mt5-small' )
UpperCamelCase = tokenizer('Hello there' , return_tensors='np' ).input_ids
UpperCamelCase = tokenizer('Hi I am' , return_tensors='np' ).input_ids
UpperCamelCase = shift_tokens_right(A_ , model.config.pad_token_id , model.config.decoder_start_token_id )
UpperCamelCase = model(A_ , decoder_input_ids=A_ ).logits
UpperCamelCase = optax.softmax_cross_entropy(A_ , onehot(A_ , logits.shape[-1] ) ).mean()
UpperCamelCase = -(labels.shape[-1] * loss.item())
UpperCamelCase = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 705 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCAmelCase : Tuple = logging.get_logger(__name__)
_UpperCAmelCase : Union[str, Any] = {
"facebook/data2vec-text-base": "https://huggingface.co/data2vec/resolve/main/config.json",
}
class lowercase ( _SCREAMING_SNAKE_CASE ):
__lowercase : Dict = "data2vec-text"
def __init__( self , A_=30_522 , A_=768 , A_=12 , A_=12 , A_=3_072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=2 , A_=0.02 , A_=1e-12 , A_=1 , A_=0 , A_=2 , A_="absolute" , A_=True , A_=None , **A_ , ) -> Any:
"""simple docstring"""
super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ )
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = hidden_act
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
UpperCamelCase = position_embedding_type
UpperCamelCase = use_cache
UpperCamelCase = classifier_dropout
class lowercase ( _SCREAMING_SNAKE_CASE ):
@property
def __UpperCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
UpperCamelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
UpperCamelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 3 | 0 |
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class lowercase ( _SCREAMING_SNAKE_CASE ):
__lowercase : List[str] = ""
__lowercase : int = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
__lowercase : Dict = None # compression type in fsspec. ex: "gzip"
__lowercase : Dict = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self , A_ = "" , A_ = None , A_ = None , **A_ ) -> Optional[Any]:
"""simple docstring"""
super().__init__(self , **__UpperCamelCase )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
UpperCamelCase = fsspec.open(
__UpperCamelCase , mode='rb' , protocol=__UpperCamelCase , compression=self.compression , client_kwargs={
'requote_redirect_url': False, # see https://github.com/huggingface/datasets/pull/5459
'trust_env': True, # Enable reading proxy env variables.
**(target_options or {}).pop('client_kwargs' , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
UpperCamelCase = os.path.basename(self.file.path.split('::' )[0] )
UpperCamelCase = (
self.compressed_name[: self.compressed_name.rindex('.' )]
if '.' in self.compressed_name
else self.compressed_name
)
UpperCamelCase = None
@classmethod
def __UpperCamelCase ( cls , A_ ) -> Optional[int]:
"""simple docstring"""
# compressed file paths are always relative to the archive root
return super()._strip_protocol(__UpperCamelCase ).lstrip('/' )
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
if self.dir_cache is None:
UpperCamelCase = {**self.file.fs.info(self.file.path ), 'name': self.uncompressed_name}
UpperCamelCase = {f['name']: f}
def __UpperCamelCase ( self , A_ ) -> List[Any]:
"""simple docstring"""
return self.file.open().read()
def __UpperCamelCase ( self , A_ , A_ = "rb" , A_=None , A_=True , A_=None , **A_ , ) -> Tuple:
"""simple docstring"""
UpperCamelCase = self._strip_protocol(__UpperCamelCase )
if mode != "rb":
raise ValueError(F'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''' )
return self.file.open()
class lowercase ( _SCREAMING_SNAKE_CASE ):
__lowercase : List[str] = "bz2"
__lowercase : Optional[Any] = "bz2"
__lowercase : Dict = ".bz2"
class lowercase ( _SCREAMING_SNAKE_CASE ):
__lowercase : Dict = "gzip"
__lowercase : Optional[Any] = "gzip"
__lowercase : Optional[int] = ".gz"
class lowercase ( _SCREAMING_SNAKE_CASE ):
__lowercase : int = "lz4"
__lowercase : Optional[int] = "lz4"
__lowercase : List[Any] = ".lz4"
class lowercase ( _SCREAMING_SNAKE_CASE ):
__lowercase : Optional[int] = "xz"
__lowercase : str = "xz"
__lowercase : List[Any] = ".xz"
class lowercase ( _SCREAMING_SNAKE_CASE ):
__lowercase : Optional[Any] = "zstd"
__lowercase : Dict = "zstd"
__lowercase : Any = ".zst"
def __init__( self , A_ , A_ = "rb" , A_ = None , A_ = None , A_ = DEFAULT_BLOCK_SIZE , **A_ , ) -> Any:
"""simple docstring"""
super().__init__(
fo=__UpperCamelCase , mode=__UpperCamelCase , target_protocol=__UpperCamelCase , target_options=__UpperCamelCase , block_size=__UpperCamelCase , **__UpperCamelCase , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
UpperCamelCase = self.file.__enter__
class lowercase :
def __init__( self , A_ ) -> List[str]:
"""simple docstring"""
UpperCamelCase = file_
def __enter__( self ) -> Any:
"""simple docstring"""
self._file.__enter__()
return self
def __exit__( self , *A_ , **A_ ) -> Any:
"""simple docstring"""
self._file.__exit__(*__UpperCamelCase , **__UpperCamelCase )
def __iter__( self ) -> Union[str, Any]:
"""simple docstring"""
return iter(self._file )
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
return next(self._file )
def __getattr__( self , A_ ) -> Tuple:
"""simple docstring"""
return getattr(self._file , __UpperCamelCase )
def fixed_enter(*A_ , **A_ ):
return WrappedFile(_enter(*__UpperCamelCase , **__UpperCamelCase ) )
UpperCamelCase = fixed_enter
| 706 |
from random import shuffle
import tensorflow as tf
from numpy import array
def A ( lowercase , lowercase ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase = int(lowercase )
assert noofclusters < len(lowercase )
# Find out the dimensionality
UpperCamelCase = len(vectors[0] )
# Will help select random centroids from among the available vectors
UpperCamelCase = list(range(len(lowercase ) ) )
shuffle(lowercase )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
UpperCamelCase = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
UpperCamelCase = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
UpperCamelCase = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(lowercase )
]
##These nodes will assign the centroid Variables the appropriate
##values
UpperCamelCase = tf.placeholder('float64' , [dim] )
UpperCamelCase = []
for centroid in centroids:
cent_assigns.append(tf.assign(lowercase , lowercase ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
UpperCamelCase = [tf.Variable(0 ) for i in range(len(lowercase ) )]
##These nodes will assign an assignment Variable the appropriate
##value
UpperCamelCase = tf.placeholder('int32' )
UpperCamelCase = []
for assignment in assignments:
cluster_assigns.append(tf.assign(lowercase , lowercase ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
UpperCamelCase = tf.placeholder('float' , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
UpperCamelCase = tf.reduce_mean(lowercase , 0 )
##Node for computing Euclidean distances
# Placeholders for input
UpperCamelCase = tf.placeholder('float' , [dim] )
UpperCamelCase = tf.placeholder('float' , [dim] )
UpperCamelCase = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(lowercase , lowercase ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
UpperCamelCase = tf.placeholder('float' , [noofclusters] )
UpperCamelCase = tf.argmin(lowercase , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
UpperCamelCase = tf.initialize_all_variables()
# Initialize all variables
sess.run(lowercase )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
UpperCamelCase = 100
for _ in range(lowercase ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(lowercase ) ):
UpperCamelCase = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
UpperCamelCase = [
sess.run(lowercase , feed_dict={va: vect, va: sess.run(lowercase )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
UpperCamelCase = sess.run(
lowercase , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(lowercase ):
# Collect all the vectors assigned to this cluster
UpperCamelCase = [
vectors[i]
for i in range(len(lowercase ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
UpperCamelCase = sess.run(
lowercase , feed_dict={mean_input: array(lowercase )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
UpperCamelCase = sess.run(lowercase )
UpperCamelCase = sess.run(lowercase )
return centroids, assignments
| 3 | 0 |
import math
def A ( lowercase ) -> str:
'''simple docstring'''
UpperCamelCase = 0
UpperCamelCase = 0
while num > 0:
UpperCamelCase = num % 8
UpperCamelCase = octal + (remainder * math.floor(math.pow(10 , lowercase ) ))
counter += 1
UpperCamelCase = math.floor(num / 8 ) # basically /= 8 without remainder if any
# This formatting removes trailing '.0' from `octal`.
return f'''0o{int(lowercase )}'''
def A ( ) -> None:
'''simple docstring'''
print('\n2 in octal is:' )
print(decimal_to_octal(2 ) ) # = 2
print('\n8 in octal is:' )
print(decimal_to_octal(8 ) ) # = 10
print('\n65 in octal is:' )
print(decimal_to_octal(65 ) ) # = 101
print('\n216 in octal is:' )
print(decimal_to_octal(216 ) ) # = 330
print('\n512 in octal is:' )
print(decimal_to_octal(512 ) ) # = 1000
print('\n' )
if __name__ == "__main__":
main()
| 707 |
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
_UpperCAmelCase : Tuple = _symbol_database.Default()
_UpperCAmelCase : List[Any] = _descriptor_pool.Default().AddSerializedFile(
b"\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03"
)
_UpperCAmelCase : int = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, "sentencepiece_model_pb2", _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
_UpperCAmelCase : int = None
_UpperCAmelCase : List[str] = b"H\003"
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
_UpperCAmelCase : Optional[Any] = 45
_UpperCAmelCase : Any = 1_581
_UpperCAmelCase : Tuple = 1_517
_UpperCAmelCase : List[str] = 1_570
_UpperCAmelCase : int = 1_584
_UpperCAmelCase : List[Any] = 1_793
_UpperCAmelCase : Optional[int] = 1_795
_UpperCAmelCase : Any = 1_916
_UpperCAmelCase : Tuple = 1_864
_UpperCAmelCase : List[Any] = 1_905
_UpperCAmelCase : Union[str, Any] = 1_919
_UpperCAmelCase : str = 2_429
_UpperCAmelCase : Any = 2_208
_UpperCAmelCase : Dict = 2_418
_UpperCAmelCase : Optional[Any] = 2_323
_UpperCAmelCase : Tuple = 2_407
# @@protoc_insertion_point(module_scope)
| 3 | 0 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : Any = logging.get_logger(__name__)
_UpperCAmelCase : str = {
"microsoft/unispeech-sat-base-100h-libri-ft": (
"https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json"
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class lowercase ( __A ):
__lowercase : int = """unispeech-sat"""
def __init__( self , A_=32 , A_=768 , A_=12 , A_=12 , A_=3_072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=0.1 , A_=0.0 , A_=0.0 , A_=0.1 , A_=0.1 , A_=0.02 , A_=1e-5 , A_="group" , A_="gelu" , A_=(512, 512, 512, 512, 512, 512, 512) , A_=(5, 2, 2, 2, 2, 2, 2) , A_=(10, 3, 3, 3, 3, 2, 2) , A_=False , A_=128 , A_=16 , A_=False , A_=True , A_=0.05 , A_=10 , A_=2 , A_=0.0 , A_=10 , A_=0 , A_=320 , A_=2 , A_=0.1 , A_=100 , A_=256 , A_=256 , A_=0.1 , A_="mean" , A_=False , A_=False , A_=256 , A_=(512, 512, 512, 512, 1_500) , A_=(5, 3, 3, 1, 1) , A_=(1, 2, 3, 1, 1) , A_=512 , A_=0 , A_=1 , A_=2 , A_=504 , **A_ , ) -> Any:
"""simple docstring"""
super().__init__(**UpperCamelCase__ , pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ )
UpperCamelCase = hidden_size
UpperCamelCase = feat_extract_norm
UpperCamelCase = feat_extract_activation
UpperCamelCase = list(UpperCamelCase__ )
UpperCamelCase = list(UpperCamelCase__ )
UpperCamelCase = list(UpperCamelCase__ )
UpperCamelCase = conv_bias
UpperCamelCase = num_conv_pos_embeddings
UpperCamelCase = num_conv_pos_embedding_groups
UpperCamelCase = len(self.conv_dim )
UpperCamelCase = num_hidden_layers
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = num_attention_heads
UpperCamelCase = hidden_dropout
UpperCamelCase = attention_dropout
UpperCamelCase = activation_dropout
UpperCamelCase = feat_proj_dropout
UpperCamelCase = final_dropout
UpperCamelCase = layerdrop
UpperCamelCase = layer_norm_eps
UpperCamelCase = initializer_range
UpperCamelCase = vocab_size
UpperCamelCase = num_clusters
UpperCamelCase = do_stable_layer_norm
UpperCamelCase = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCamelCase = apply_spec_augment
UpperCamelCase = mask_time_prob
UpperCamelCase = mask_time_length
UpperCamelCase = mask_time_min_masks
UpperCamelCase = mask_feature_prob
UpperCamelCase = mask_feature_length
UpperCamelCase = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
UpperCamelCase = num_codevectors_per_group
UpperCamelCase = num_codevector_groups
UpperCamelCase = contrastive_logits_temperature
UpperCamelCase = feat_quantizer_dropout
UpperCamelCase = num_negatives
UpperCamelCase = codevector_dim
UpperCamelCase = proj_codevector_dim
UpperCamelCase = diversity_loss_weight
# ctc loss
UpperCamelCase = ctc_loss_reduction
UpperCamelCase = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
UpperCamelCase = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
UpperCamelCase = list(UpperCamelCase__ )
UpperCamelCase = list(UpperCamelCase__ )
UpperCamelCase = list(UpperCamelCase__ )
UpperCamelCase = xvector_output_dim
@property
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 708 |
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class lowercase ( unittest.TestCase ):
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
# A mock response for an HTTP head request to emulate server down
UpperCamelCase = mock.Mock()
UpperCamelCase = 500
UpperCamelCase = {}
UpperCamelCase = HTTPError
UpperCamelCase = {}
# Download this model to make sure it's in the cache.
UpperCamelCase = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=A_ ) as mock_head:
UpperCamelCase = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
# A mock response for an HTTP head request to emulate server down
UpperCamelCase = mock.Mock()
UpperCamelCase = 500
UpperCamelCase = {}
UpperCamelCase = HTTPError
UpperCamelCase = {}
# Download this model to make sure it's in the cache.
UpperCamelCase = GPTaTokenizerFast.from_pretrained('gpt2' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=A_ ) as mock_head:
UpperCamelCase = GPTaTokenizerFast.from_pretrained('gpt2' )
# This check we did call the fake head request
mock_head.assert_called()
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
# This test is for deprecated behavior and can be removed in v5
try:
UpperCamelCase = tempfile.mktemp()
with open(A_ , 'wb' ) as f:
http_get('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' , A_ )
UpperCamelCase = AlbertTokenizer.from_pretrained(A_ )
finally:
os.remove(A_ )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile('tokenizer.json' ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open('tokenizer.json' , 'wb' ) as f:
http_get('https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json' , A_ )
UpperCamelCase = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size , 1_000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove('tokenizer.json' )
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
# This test is for deprecated behavior and can be removed in v5
UpperCamelCase = AlbertTokenizer.from_pretrained('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' )
@is_staging_test
class lowercase ( unittest.TestCase ):
__lowercase : int = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def __UpperCamelCase ( cls ) -> Tuple:
"""simple docstring"""
UpperCamelCase = TOKEN
HfFolder.save_token(A_ )
@classmethod
def __UpperCamelCase ( cls ) -> Optional[int]:
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id='test-tokenizer' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-tokenizer-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-tokenizer' )
except HTTPError:
pass
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase = os.path.join(A_ , 'vocab.txt' )
with open(A_ , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
UpperCamelCase = BertTokenizer(A_ )
tokenizer.push_to_hub('test-tokenizer' , use_auth_token=self._token )
UpperCamelCase = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id='test-tokenizer' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(A_ , repo_id='test-tokenizer' , push_to_hub=A_ , use_auth_token=self._token )
UpperCamelCase = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase = os.path.join(A_ , 'vocab.txt' )
with open(A_ , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
UpperCamelCase = BertTokenizer(A_ )
tokenizer.push_to_hub('valid_org/test-tokenizer-org' , use_auth_token=self._token )
UpperCamelCase = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-tokenizer-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
A_ , repo_id='valid_org/test-tokenizer-org' , push_to_hub=A_ , use_auth_token=self._token )
UpperCamelCase = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
@require_tokenizers
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase = os.path.join(A_ , 'vocab.txt' )
with open(A_ , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
UpperCamelCase = CustomTokenizer(A_ )
# No fast custom tokenizer
tokenizer.push_to_hub('test-dynamic-tokenizer' , use_auth_token=self._token )
UpperCamelCase = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=A_ )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizer' )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase = os.path.join(A_ , 'vocab.txt' )
with open(A_ , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
UpperCamelCase = BertTokenizerFast.from_pretrained(A_ )
bert_tokenizer.save_pretrained(A_ )
UpperCamelCase = CustomTokenizerFast.from_pretrained(A_ )
tokenizer.push_to_hub('test-dynamic-tokenizer' , use_auth_token=self._token )
UpperCamelCase = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=A_ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizerFast' )
UpperCamelCase = AutoTokenizer.from_pretrained(
F'''{USER}/test-dynamic-tokenizer''' , use_fast=A_ , trust_remote_code=A_ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizer' )
class lowercase ( unittest.TestCase ):
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = Trie()
trie.add('Hello 友達' )
self.assertEqual(trie.data , {'H': {'e': {'l': {'l': {'o': {' ': {'友': {'達': {'': 1}}}}}}}}} )
trie.add('Hello' )
trie.data
self.assertEqual(trie.data , {'H': {'e': {'l': {'l': {'o': {'': 1, ' ': {'友': {'達': {'': 1}}}}}}}}} )
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
UpperCamelCase = Trie()
self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) , ['[CLS] This is a extra_id_100'] )
trie.add('[CLS]' )
trie.add('extra_id_1' )
trie.add('extra_id_100' )
self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) , ['[CLS]', ' This is a ', 'extra_id_100'] )
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = Trie()
trie.add('A' )
self.assertEqual(trie.split('ABC' ) , ['A', 'BC'] )
self.assertEqual(trie.split('BCA' ) , ['BC', 'A'] )
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = Trie()
trie.add('TOKEN]' )
trie.add('[SPECIAL_TOKEN]' )
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) , ['This is something ', '[SPECIAL_TOKEN]'] )
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = Trie()
trie.add('A' )
trie.add('P' )
trie.add('[SPECIAL_TOKEN]' )
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) , ['This is something ', '[SPECIAL_TOKEN]'] )
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = Trie()
trie.add('AB' )
trie.add('B' )
trie.add('C' )
self.assertEqual(trie.split('ABC' ) , ['AB', 'C'] )
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = Trie()
trie.add('ABC' )
trie.add('B' )
trie.add('CD' )
self.assertEqual(trie.split('ABCD' ) , ['ABC', 'D'] )
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
# Even if the offsets are wrong, we necessarily output correct string
# parts.
UpperCamelCase = Trie()
UpperCamelCase = trie.cut_text('ABC' , [0, 0, 2, 1, 2, 3] )
self.assertEqual(A_ , ['AB', 'C'] )
| 3 | 0 |
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def A ( ) -> List[str]:
'''simple docstring'''
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'-m' , '--pretrained_model_name_or_path' , type=lowercase , default=lowercase , required=lowercase , help='Path to pretrained model or model identifier from huggingface.co/models.' , )
parser.add_argument(
'-c' , '--caption' , type=lowercase , default='robotic cat with wings' , help='Text used to generate images.' , )
parser.add_argument(
'-n' , '--images_num' , type=lowercase , default=4 , help='How much images to generate.' , )
parser.add_argument(
'-s' , '--seed' , type=lowercase , default=42 , help='Seed for random process.' , )
parser.add_argument(
'-ci' , '--cuda_id' , type=lowercase , default=0 , help='cuda_id.' , )
UpperCamelCase = parser.parse_args()
return args
def A ( lowercase , lowercase , lowercase ) -> Dict:
'''simple docstring'''
if not len(lowercase ) == rows * cols:
raise ValueError('The specified number of rows and columns are not correct.' )
UpperCamelCase , UpperCamelCase = imgs[0].size
UpperCamelCase = Image.new('RGB' , size=(cols * w, rows * h) )
UpperCamelCase , UpperCamelCase = grid.size
for i, img in enumerate(lowercase ):
grid.paste(lowercase , box=(i % cols * w, i // cols * h) )
return grid
def A ( lowercase , lowercase="robotic cat with wings" , lowercase=7.5 , lowercase=50 , lowercase=1 , lowercase=42 , ) -> Any:
'''simple docstring'''
UpperCamelCase = torch.Generator(pipeline.device ).manual_seed(lowercase )
UpperCamelCase = pipeline(
lowercase , guidance_scale=lowercase , num_inference_steps=lowercase , generator=lowercase , num_images_per_prompt=lowercase , ).images
UpperCamelCase = int(math.sqrt(lowercase ) )
UpperCamelCase = image_grid(lowercase , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
_UpperCAmelCase : List[str] = parse_args()
# Load models and create wrapper for stable diffusion
_UpperCAmelCase : str = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="tokenizer")
_UpperCAmelCase : Any = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="text_encoder")
_UpperCAmelCase : Tuple = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="vae")
_UpperCAmelCase : Any = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="unet")
_UpperCAmelCase : str = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
_UpperCAmelCase : Tuple = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, "best_model.pt")):
_UpperCAmelCase : str = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, "unet", unet)
else:
_UpperCAmelCase : Dict = unet.to(torch.device("cuda", args.cuda_id))
_UpperCAmelCase : Any = pipeline.to(unet.device)
_UpperCAmelCase ,_UpperCAmelCase : Optional[Any] = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, "{}.png".format("_".join(args.caption.split()))))
_UpperCAmelCase : Optional[Any] = os.path.join(args.pretrained_model_name_or_path, "_".join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, "{}.png".format(idx + 1)))
| 709 |
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def A ( lowercase , lowercase ) -> Optional[int]:
'''simple docstring'''
assert isinstance(lowercase , lowercase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def A ( lowercase , lowercase , lowercase ) -> Tuple:
'''simple docstring'''
UpperCamelCase = tmp_path / 'cache'
UpperCamelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCamelCase = ParquetDatasetReader(lowercase , cache_dir=lowercase , keep_in_memory=lowercase ).read()
_check_parquet_dataset(lowercase , lowercase )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def A ( lowercase , lowercase , lowercase ) -> Tuple:
'''simple docstring'''
UpperCamelCase = tmp_path / 'cache'
UpperCamelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
UpperCamelCase = features.copy() if features else default_expected_features
UpperCamelCase = (
Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase = ParquetDatasetReader(lowercase , features=lowercase , cache_dir=lowercase ).read()
_check_parquet_dataset(lowercase , lowercase )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def A ( lowercase , lowercase , lowercase ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase = tmp_path / 'cache'
UpperCamelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
UpperCamelCase = ParquetDatasetReader(lowercase , cache_dir=lowercase , split=lowercase ).read()
_check_parquet_dataset(lowercase , lowercase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('path_type' , [str, list] )
def A ( lowercase , lowercase , lowercase ) -> Union[str, Any]:
'''simple docstring'''
if issubclass(lowercase , lowercase ):
UpperCamelCase = parquet_path
elif issubclass(lowercase , lowercase ):
UpperCamelCase = [parquet_path]
UpperCamelCase = tmp_path / 'cache'
UpperCamelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
UpperCamelCase = ParquetDatasetReader(lowercase , cache_dir=lowercase ).read()
_check_parquet_dataset(lowercase , lowercase )
def A ( lowercase , lowercase , lowercase=("train",) ) -> Tuple:
'''simple docstring'''
assert isinstance(lowercase , lowercase )
for split in splits:
UpperCamelCase = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def A ( lowercase , lowercase , lowercase ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase = tmp_path / 'cache'
UpperCamelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCamelCase = ParquetDatasetReader(
{'train': parquet_path} , cache_dir=lowercase , keep_in_memory=lowercase ).read()
_check_parquet_datasetdict(lowercase , lowercase )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def A ( lowercase , lowercase , lowercase ) -> List[Any]:
'''simple docstring'''
UpperCamelCase = tmp_path / 'cache'
UpperCamelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
UpperCamelCase = features.copy() if features else default_expected_features
UpperCamelCase = (
Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase = ParquetDatasetReader({'train': parquet_path} , features=lowercase , cache_dir=lowercase ).read()
_check_parquet_datasetdict(lowercase , lowercase )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def A ( lowercase , lowercase , lowercase ) -> Union[str, Any]:
'''simple docstring'''
if split:
UpperCamelCase = {split: parquet_path}
else:
UpperCamelCase = 'train'
UpperCamelCase = {'train': parquet_path, 'test': parquet_path}
UpperCamelCase = tmp_path / 'cache'
UpperCamelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
UpperCamelCase = ParquetDatasetReader(lowercase , cache_dir=lowercase ).read()
_check_parquet_datasetdict(lowercase , lowercase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def A ( lowercase , lowercase ) -> List[Any]:
'''simple docstring'''
UpperCamelCase = ParquetDatasetWriter(lowercase , tmp_path / 'foo.parquet' )
assert writer.write() > 0
UpperCamelCase = pq.ParquetFile(tmp_path / 'foo.parquet' )
UpperCamelCase = pf.read()
assert dataset.data.table == output_table
def A ( lowercase , lowercase ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase = str(shared_datadir / 'test_image_rgb.jpg' )
UpperCamelCase = {'image': [image_path]}
UpperCamelCase = Features({'image': Image()} )
UpperCamelCase = Dataset.from_dict(lowercase , features=lowercase )
UpperCamelCase = ParquetDatasetWriter(lowercase , tmp_path / 'foo.parquet' )
assert writer.write() > 0
UpperCamelCase = Dataset.from_parquet(str(tmp_path / 'foo.parquet' ) )
assert dataset.features == reloaded_dataset.features
UpperCamelCase = ParquetDatasetReader(str(tmp_path / 'foo.parquet' ) , streaming=lowercase ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
'feature, expected' , [
(Features({'foo': Value('int32' )} ), None),
(Features({'image': Image(), 'foo': Value('int32' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({'nested': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def A ( lowercase , lowercase ) -> Union[str, Any]:
'''simple docstring'''
assert get_writer_batch_size(lowercase ) == expected
| 3 | 0 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
_UpperCAmelCase : List[str] = logging.get_logger(__name__)
class lowercase ( lowercase_ ):
__lowercase : Optional[int] = ['''pixel_values''']
def __init__( self , A_ = True , A_ = None , A_ = PILImageResampling.BILINEAR , A_ = True , A_ = None , A_ = True , A_ = 1 / 255 , A_ = True , A_ = None , A_ = None , **A_ , ) -> Tuple:
"""simple docstring"""
super().__init__(**UpperCamelCase__ )
UpperCamelCase = size if size is not None else {'shortest_edge': 256}
UpperCamelCase = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
UpperCamelCase = crop_size if crop_size is not None else {'height': 224, 'width': 224}
UpperCamelCase = get_size_dict(UpperCamelCase__ , param_name='crop_size' )
UpperCamelCase = do_resize
UpperCamelCase = size
UpperCamelCase = resample
UpperCamelCase = do_center_crop
UpperCamelCase = crop_size
UpperCamelCase = do_rescale
UpperCamelCase = rescale_factor
UpperCamelCase = do_normalize
UpperCamelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCamelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __UpperCamelCase ( self , A_ , A_ , A_ = PILImageResampling.BICUBIC , A_ = None , **A_ , ) -> List[str]:
"""simple docstring"""
UpperCamelCase = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
UpperCamelCase = get_resize_output_image_size(UpperCamelCase__ , size=size['shortest_edge'] , default_to_square=UpperCamelCase__ )
return resize(UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def __UpperCamelCase ( self , A_ , A_ , A_ = None , **A_ , ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = get_size_dict(UpperCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}''' )
return center_crop(UpperCamelCase__ , size=(size['height'], size['width']) , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def __UpperCamelCase ( self , A_ , A_ , A_ = None , **A_ ) -> Optional[int]:
"""simple docstring"""
return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def __UpperCamelCase ( self , A_ , A_ , A_ , A_ = None , **A_ , ) -> List[str]:
"""simple docstring"""
return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def __UpperCamelCase ( self , A_ , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = ChannelDimension.FIRST , **A_ , ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = do_resize if do_resize is not None else self.do_resize
UpperCamelCase = size if size is not None else self.size
UpperCamelCase = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
UpperCamelCase = resample if resample is not None else self.resample
UpperCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase = crop_size if crop_size is not None else self.crop_size
UpperCamelCase = get_size_dict(UpperCamelCase__ , param_name='crop_size' )
UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase = image_mean if image_mean is not None else self.image_mean
UpperCamelCase = image_std if image_std is not None else self.image_std
UpperCamelCase = make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
UpperCamelCase = [to_numpy_array(UpperCamelCase__ ) for image in images]
if do_resize:
UpperCamelCase = [self.resize(image=UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ ) for image in images]
if do_center_crop:
UpperCamelCase = [self.center_crop(image=UpperCamelCase__ , size=UpperCamelCase__ ) for image in images]
if do_rescale:
UpperCamelCase = [self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ ) for image in images]
if do_normalize:
UpperCamelCase = [self.normalize(image=UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ ) for image in images]
UpperCamelCase = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
UpperCamelCase = {'pixel_values': images}
return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
def __UpperCamelCase ( self , A_ , A_ = None ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(UpperCamelCase__ ):
UpperCamelCase = target_sizes.numpy()
UpperCamelCase = []
for idx in range(len(UpperCamelCase__ ) ):
UpperCamelCase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='bilinear' , align_corners=UpperCamelCase__ )
UpperCamelCase = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(UpperCamelCase__ )
else:
UpperCamelCase = logits.argmax(dim=1 )
UpperCamelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 710 |
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class lowercase ( unittest.TestCase ):
def __init__( self , A_ , A_=7 , A_=3 , A_=18 , A_=30 , A_=400 , A_=True , A_=None , A_=True , A_=False , A_=True , A_=True , A_=[0.5, 0.5, 0.5] , A_=[0.5, 0.5, 0.5] , ) -> Tuple:
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = num_channels
UpperCamelCase = image_size
UpperCamelCase = min_resolution
UpperCamelCase = max_resolution
UpperCamelCase = do_resize
UpperCamelCase = size if size is not None else {'height': 18, 'width': 20}
UpperCamelCase = do_thumbnail
UpperCamelCase = do_align_axis
UpperCamelCase = do_pad
UpperCamelCase = do_normalize
UpperCamelCase = image_mean
UpperCamelCase = image_std
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class lowercase ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
__lowercase : Optional[int] = DonutImageProcessor if is_vision_available() else None
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = DonutImageProcessingTester(self )
@property
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , 'do_resize' ) )
self.assertTrue(hasattr(A_ , 'size' ) )
self.assertTrue(hasattr(A_ , 'do_thumbnail' ) )
self.assertTrue(hasattr(A_ , 'do_align_long_axis' ) )
self.assertTrue(hasattr(A_ , 'do_pad' ) )
self.assertTrue(hasattr(A_ , 'do_normalize' ) )
self.assertTrue(hasattr(A_ , 'image_mean' ) )
self.assertTrue(hasattr(A_ , 'image_std' ) )
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 20} )
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
# Previous config had dimensions in (width, height) order
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {'height': 84, 'width': 42} )
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
pass
@is_flaky()
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
# Initialize image_processing
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , Image.Image )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
UpperCamelCase = image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
# Initialize image_processing
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , np.ndarray )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
UpperCamelCase = image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
# Initialize image_processing
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
UpperCamelCase = image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
| 3 | 0 |
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
_UpperCAmelCase : Any = get_tests_dir("fixtures")
class lowercase ( unittest.TestCase ):
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
UpperCamelCase = mock.Mock()
UpperCamelCase = 500
UpperCamelCase = {}
UpperCamelCase = HTTPError
UpperCamelCase = {}
# Download this model to make sure it's in the cache.
UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' , return_value=lowerCAmelCase_ ) as mock_head:
UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' )
# This check we did call the fake head request
mock_head.assert_called()
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json' )
@is_staging_test
class lowercase ( unittest.TestCase ):
@classmethod
def __UpperCamelCase ( cls ) -> Any:
"""simple docstring"""
UpperCamelCase = TOKEN
HfFolder.save_token(lowerCAmelCase_ )
@classmethod
def __UpperCamelCase ( cls ) -> Dict:
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id='test-feature-extractor' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-feature-extractor-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-feature-extractor' )
except HTTPError:
pass
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(lowerCAmelCase_ )
feature_extractor.push_to_hub('test-feature-extractor' , use_auth_token=self._token )
UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(F'''{USER}/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowerCAmelCase_ , getattr(lowerCAmelCase_ , lowerCAmelCase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-feature-extractor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
lowerCAmelCase_ , repo_id='test-feature-extractor' , push_to_hub=lowerCAmelCase_ , use_auth_token=self._token )
UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(F'''{USER}/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowerCAmelCase_ , getattr(lowerCAmelCase_ , lowerCAmelCase_ ) )
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(lowerCAmelCase_ )
feature_extractor.push_to_hub('valid_org/test-feature-extractor' , use_auth_token=self._token )
UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowerCAmelCase_ , getattr(lowerCAmelCase_ , lowerCAmelCase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-feature-extractor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
lowerCAmelCase_ , repo_id='valid_org/test-feature-extractor-org' , push_to_hub=lowerCAmelCase_ , use_auth_token=self._token )
UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor-org' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowerCAmelCase_ , getattr(lowerCAmelCase_ , lowerCAmelCase_ ) )
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
CustomFeatureExtractor.register_for_auto_class()
UpperCamelCase = CustomFeatureExtractor.from_pretrained(lowerCAmelCase_ )
feature_extractor.push_to_hub('test-dynamic-feature-extractor' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {'AutoFeatureExtractor': 'custom_feature_extraction.CustomFeatureExtractor'} , )
UpperCamelCase = AutoFeatureExtractor.from_pretrained(
F'''{USER}/test-dynamic-feature-extractor''' , trust_remote_code=lowerCAmelCase_ )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , 'CustomFeatureExtractor' )
| 711 |
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase : Dict = logging.get_logger(__name__)
_UpperCAmelCase : Optional[Any] = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
_UpperCAmelCase : str = {
"vocab_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"
},
"merges_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"
},
"tokenizer_config_file": {
"facebook/blenderbot_small-90M": (
"https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"
)
},
}
_UpperCAmelCase : List[str] = {"facebook/blenderbot_small-90M": 512}
def A ( lowercase ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase = set()
UpperCamelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCamelCase = char
UpperCamelCase = set(lowercase )
return pairs
class lowercase ( _SCREAMING_SNAKE_CASE ):
__lowercase : Optional[Any] = VOCAB_FILES_NAMES
__lowercase : Tuple = PRETRAINED_VOCAB_FILES_MAP
__lowercase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : Any = ["input_ids", "attention_mask"]
def __init__( self , A_ , A_ , A_="__start__" , A_="__end__" , A_="__unk__" , A_="__null__" , **A_ , ) -> List[Any]:
"""simple docstring"""
super().__init__(unk_token=A_ , bos_token=A_ , eos_token=A_ , pad_token=A_ , **A_ )
with open(A_ , encoding='utf-8' ) as vocab_handle:
UpperCamelCase = json.load(A_ )
UpperCamelCase = {v: k for k, v in self.encoder.items()}
with open(A_ , encoding='utf-8' ) as merges_handle:
UpperCamelCase = merges_handle.read().split('\n' )[1:-1]
UpperCamelCase = [tuple(merge.split() ) for merge in merges]
UpperCamelCase = dict(zip(A_ , range(len(A_ ) ) ) )
UpperCamelCase = {}
@property
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
return len(self.encoder )
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def __UpperCamelCase ( self , A_ ) -> str:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
UpperCamelCase = re.sub('([.,!?()])' , r' \1' , A_ )
UpperCamelCase = re.sub('(\')' , r' \1 ' , A_ )
UpperCamelCase = re.sub(r'\s{2,}' , ' ' , A_ )
if "\n" in token:
UpperCamelCase = token.replace('\n' , ' __newln__' )
UpperCamelCase = token.split(' ' )
UpperCamelCase = []
for token in tokens:
if not len(A_ ):
continue
UpperCamelCase = token.lower()
UpperCamelCase = tuple(A_ )
UpperCamelCase = tuple(list(word[:-1] ) + [word[-1] + '</w>'] )
UpperCamelCase = get_pairs(A_ )
if not pairs:
words.append(A_ )
continue
while True:
UpperCamelCase = min(A_ , key=lambda A_ : self.bpe_ranks.get(A_ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
UpperCamelCase , UpperCamelCase = bigram
UpperCamelCase = []
UpperCamelCase = 0
while i < len(A_ ):
try:
UpperCamelCase = word.index(A_ , A_ )
new_word.extend(word[i:j] )
UpperCamelCase = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(A_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCamelCase = tuple(A_ )
UpperCamelCase = new_word
if len(A_ ) == 1:
break
else:
UpperCamelCase = get_pairs(A_ )
UpperCamelCase = '@@ '.join(A_ )
UpperCamelCase = word[:-4]
UpperCamelCase = word
words.append(A_ )
return " ".join(A_ )
def __UpperCamelCase ( self , A_ ) -> List[str]:
"""simple docstring"""
UpperCamelCase = []
UpperCamelCase = re.findall(r'\S+\n?' , A_ )
for token in words:
split_tokens.extend(list(self.bpe(A_ ).split(' ' ) ) )
return split_tokens
def __UpperCamelCase ( self , A_ ) -> int:
"""simple docstring"""
UpperCamelCase = token.lower()
return self.encoder.get(A_ , self.encoder.get(self.unk_token ) )
def __UpperCamelCase ( self , A_ ) -> str:
"""simple docstring"""
return self.decoder.get(A_ , self.unk_token )
def __UpperCamelCase ( self , A_ ) -> str:
"""simple docstring"""
UpperCamelCase = ' '.join(A_ ).replace('@@ ' , '' ).strip()
return out_string
def __UpperCamelCase ( self , A_ , A_ = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(A_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCamelCase = os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
UpperCamelCase = os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(A_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=A_ , ensure_ascii=A_ ) + '\n' )
UpperCamelCase = 0
with open(A_ , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda A_ : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
' Please check that the tokenizer is not corrupted!' )
UpperCamelCase = token_index
writer.write(' '.join(A_ ) + '\n' )
index += 1
return vocab_file, merge_file
| 3 | 0 |
import functools
def A ( lowercase , lowercase ) -> int:
'''simple docstring'''
UpperCamelCase = len(a_ )
UpperCamelCase = len(a_ )
@functools.cache
def min_distance(lowercase , lowercase ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
UpperCamelCase = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , a_ ) , 1 + min_distance(a_ , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 712 |
def A ( lowercase ) -> str:
'''simple docstring'''
UpperCamelCase = int(lowercase )
if decimal in (0, 1): # Exit cases for the recursion
return str(lowercase )
UpperCamelCase , UpperCamelCase = divmod(lowercase , 2 )
return binary_recursive(lowercase ) + str(lowercase )
def A ( lowercase ) -> str:
'''simple docstring'''
UpperCamelCase = str(lowercase ).strip()
if not number:
raise ValueError('No input value was provided' )
UpperCamelCase = '-' if number.startswith('-' ) else ''
UpperCamelCase = number.lstrip('-' )
if not number.isnumeric():
raise ValueError('Input value is not an integer' )
return f'''{negative}0b{binary_recursive(int(lowercase ) )}'''
if __name__ == "__main__":
from doctest import testmod
testmod()
| 3 | 0 |
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase :
def __init__( self , A_ , A_=3 , A_=32 , A_=3 , A_=10 , A_=[10, 20, 30, 40] , A_=[1, 1, 2, 1] , A_=True , A_=True , A_="relu" , A_=3 , A_=None , ) -> List[str]:
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = image_size
UpperCamelCase = num_channels
UpperCamelCase = embeddings_size
UpperCamelCase = hidden_sizes
UpperCamelCase = depths
UpperCamelCase = is_training
UpperCamelCase = use_labels
UpperCamelCase = hidden_act
UpperCamelCase = num_labels
UpperCamelCase = scope
UpperCamelCase = len(_A )
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def __UpperCamelCase ( self , A_ , A_ , A_ ) -> List[str]:
"""simple docstring"""
UpperCamelCase = TFRegNetModel(config=_A )
UpperCamelCase = model(_A , training=_A )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __UpperCamelCase ( self , A_ , A_ , A_ ) -> List[str]:
"""simple docstring"""
UpperCamelCase = self.num_labels
UpperCamelCase = TFRegNetForImageClassification(_A )
UpperCamelCase = model(_A , labels=_A , training=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs
UpperCamelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class lowercase ( _a , _a , unittest.TestCase ):
__lowercase : Union[str, Any] = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
__lowercase : Any = (
{"feature-extraction": TFRegNetModel, "image-classification": TFRegNetForImageClassification}
if is_tf_available()
else {}
)
__lowercase : List[str] = False
__lowercase : Dict = False
__lowercase : str = False
__lowercase : Optional[Any] = False
__lowercase : List[Any] = False
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = TFRegNetModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=_A , has_text_modality=_A )
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
return
@unittest.skip(reason='RegNet does not use inputs_embeds' )
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('GPU' ) ) == 0 , reason='TF does not support backprop for grouped convolutions on CPU.' , )
@slow
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
super().test_keras_fit()
@unittest.skip(reason='RegNet does not support input and output embeddings' )
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
pass
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(_A )
UpperCamelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase = [*signature.parameters.keys()]
UpperCamelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , _A )
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
def check_hidden_states_output(A_ , A_ , A_ ):
UpperCamelCase = model_class(_A )
UpperCamelCase = model(**self._prepare_for_class(_A , _A ) , training=_A )
UpperCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCamelCase = self.model_tester.num_stages
self.assertEqual(len(_A ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
UpperCamelCase = layer_type
UpperCamelCase = True
check_hidden_states_output(_A , _A , _A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase = True
check_hidden_states_output(_A , _A , _A )
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(A_ , A_ , A_ , A_={} ):
UpperCamelCase = model(_A , return_dict=_A , **_A )
UpperCamelCase = model(_A , return_dict=_A , **_A ).to_tuple()
def recursive_check(A_ , A_ ):
if isinstance(_A , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(_A , _A ):
recursive_check(_A , _A )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(_A , _A ) ) , msg=(
'Tuple and dict output are not equal. Difference:'
F''' {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}'''
) , )
recursive_check(_A , _A )
for model_class in self.all_model_classes:
UpperCamelCase = model_class(_A )
UpperCamelCase = self._prepare_for_class(_A , _A )
UpperCamelCase = self._prepare_for_class(_A , _A )
check_equivalence(_A , _A , _A )
UpperCamelCase = self._prepare_for_class(_A , _A , return_labels=_A )
UpperCamelCase = self._prepare_for_class(_A , _A , return_labels=_A )
check_equivalence(_A , _A , _A )
UpperCamelCase = self._prepare_for_class(_A , _A )
UpperCamelCase = self._prepare_for_class(_A , _A )
check_equivalence(_A , _A , _A , {'output_hidden_states': True} )
UpperCamelCase = self._prepare_for_class(_A , _A , return_labels=_A )
UpperCamelCase = self._prepare_for_class(_A , _A , return_labels=_A )
check_equivalence(_A , _A , _A , {'output_hidden_states': True} )
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
@slow
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = TFRegNetModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def A ( ) -> Dict:
'''simple docstring'''
UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class lowercase ( unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
UpperCamelCase = self.default_image_processor
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=_A , return_tensors='tf' )
# forward pass
UpperCamelCase = model(**_A , training=_A )
# verify the logits
UpperCamelCase = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , _A )
UpperCamelCase = tf.constant([-0.4180, -1.5051, -3.4836] )
tf.debugging.assert_near(outputs.logits[0, :3] , _A , atol=1e-4 )
| 713 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_UpperCAmelCase : Tuple = logging.get_logger(__name__)
_UpperCAmelCase : Tuple = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.linear_k": "encoder.layers.*.self_attn.linear_k",
"self_attn.linear_v": "encoder.layers.*.self_attn.linear_v",
"self_attn.linear_q": "encoder.layers.*.self_attn.linear_q",
"self_attn.pos_bias_u": "encoder.layers.*.self_attn.pos_bias_u",
"self_attn.pos_bias_v": "encoder.layers.*.self_attn.pos_bias_v",
"self_attn.linear_out": "encoder.layers.*.self_attn.linear_out",
"self_attn.linear_pos": "encoder.layers.*.self_attn.linear_pos",
"self_attn.rotary_emb": "encoder.embed_positions",
"self_attn_layer_norm": "encoder.layers.*.self_attn_layer_norm",
"conv_module.pointwise_conv1": "encoder.layers.*.conv_module.pointwise_conv1",
"conv_module.pointwise_conv2": "encoder.layers.*.conv_module.pointwise_conv2",
"conv_module.depthwise_conv": "encoder.layers.*.conv_module.depthwise_conv",
"conv_module.batch_norm": "encoder.layers.*.conv_module.batch_norm",
"conv_module.layer_norm": "encoder.layers.*.conv_module.layer_norm",
"ffn1.w_1": "encoder.layers.*.ffn1.intermediate_dense",
"ffn1.w_2": "encoder.layers.*.ffn1.output_dense",
"ffn1.layer_norm": "encoder.layers.*.ffn1_layer_norm",
"ffn2.w_1": "encoder.layers.*.ffn2.intermediate_dense",
"ffn2.w_2": "encoder.layers.*.ffn2.output_dense",
"ffn2.layer_norm": "encoder.layers.*.ffn2_layer_norm",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
_UpperCAmelCase : Any = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def A ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> Dict:
'''simple docstring'''
for attribute in key.split('.' ):
UpperCamelCase = getattr(lowercase , lowercase )
if weight_type is not None:
UpperCamelCase = getattr(lowercase , lowercase ).shape
else:
UpperCamelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
UpperCamelCase = value
elif weight_type == "weight_g":
UpperCamelCase = value
elif weight_type == "weight_v":
UpperCamelCase = value
elif weight_type == "bias":
UpperCamelCase = value
elif weight_type == "running_mean":
UpperCamelCase = value
elif weight_type == "running_var":
UpperCamelCase = value
elif weight_type == "num_batches_tracked":
UpperCamelCase = value
elif weight_type == "inv_freq":
UpperCamelCase = value
else:
UpperCamelCase = value
logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def A ( lowercase , lowercase , lowercase ) -> Any:
'''simple docstring'''
UpperCamelCase = []
UpperCamelCase = fairseq_model.state_dict()
UpperCamelCase = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
UpperCamelCase = False
if "conv_layers" in name:
load_conv_layer(
lowercase , lowercase , lowercase , lowercase , hf_model.config.feat_extract_norm == 'group' , )
UpperCamelCase = True
else:
for key, mapped_key in MAPPING.items():
UpperCamelCase = 'wav2vec2_conformer.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
UpperCamelCase = True
if "*" in mapped_key:
UpperCamelCase = name.split(lowercase )[0].split('.' )[-2]
UpperCamelCase = mapped_key.replace('*' , lowercase )
if "pos_bias_u" in name:
UpperCamelCase = None
elif "pos_bias_v" in name:
UpperCamelCase = None
elif "weight_g" in name:
UpperCamelCase = 'weight_g'
elif "weight_v" in name:
UpperCamelCase = 'weight_v'
elif "bias" in name:
UpperCamelCase = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCamelCase = 'weight'
elif "running_mean" in name:
UpperCamelCase = 'running_mean'
elif "inv_freq" in name:
UpperCamelCase = 'inv_freq'
elif "running_var" in name:
UpperCamelCase = 'running_var'
elif "num_batches_tracked" in name:
UpperCamelCase = 'num_batches_tracked'
else:
UpperCamelCase = None
set_recursively(lowercase , lowercase , lowercase , lowercase , lowercase )
continue
if not is_used:
unused_weights.append(lowercase )
logger.warning(f'''Unused weights: {unused_weights}''' )
def A ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase = full_name.split('conv_layers.' )[-1]
UpperCamelCase = name.split('.' )
UpperCamelCase = int(items[0] )
UpperCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
UpperCamelCase = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
UpperCamelCase = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
UpperCamelCase = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
UpperCamelCase = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(lowercase )
@torch.no_grad()
def A ( lowercase , lowercase , lowercase=None , lowercase=None , lowercase=True ) -> int:
'''simple docstring'''
if config_path is not None:
UpperCamelCase = WavaVecaConformerConfig.from_pretrained(lowercase , hidden_act='swish' )
else:
UpperCamelCase = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
UpperCamelCase = 'rotary'
if is_finetuned:
if dict_path:
UpperCamelCase = Dictionary.load(lowercase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCamelCase = target_dict.pad_index
UpperCamelCase = target_dict.bos_index
UpperCamelCase = target_dict.eos_index
UpperCamelCase = len(target_dict.symbols )
UpperCamelCase = os.path.join(lowercase , 'vocab.json' )
if not os.path.isdir(lowercase ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(lowercase ) )
return
os.makedirs(lowercase , exist_ok=lowercase )
UpperCamelCase = target_dict.indices
# fairseq has the <pad> and <s> switched
UpperCamelCase = 0
UpperCamelCase = 1
with open(lowercase , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(lowercase , lowercase )
UpperCamelCase = WavaVecaCTCTokenizer(
lowercase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=lowercase , )
UpperCamelCase = True if config.feat_extract_norm == 'layer' else False
UpperCamelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=lowercase , return_attention_mask=lowercase , )
UpperCamelCase = WavaVecaProcessor(feature_extractor=lowercase , tokenizer=lowercase )
processor.save_pretrained(lowercase )
UpperCamelCase = WavaVecaConformerForCTC(lowercase )
else:
UpperCamelCase = WavaVecaConformerForPreTraining(lowercase )
if is_finetuned:
UpperCamelCase , UpperCamelCase , UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
UpperCamelCase = argparse.Namespace(task='audio_pretraining' )
UpperCamelCase = fairseq.tasks.setup_task(lowercase )
UpperCamelCase , UpperCamelCase , UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowercase )
UpperCamelCase = model[0].eval()
recursively_load_weights(lowercase , lowercase , not is_finetuned )
hf_wavavec.save_pretrained(lowercase )
if __name__ == "__main__":
_UpperCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
_UpperCAmelCase : Dict = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 3 | 0 |
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
_UpperCAmelCase : str = "tiny-wmt19-en-ru"
# Build
# borrowed from a test
_UpperCAmelCase : Optional[int] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
_UpperCAmelCase : Dict = dict(zip(vocab, range(len(vocab))))
_UpperCAmelCase : List[str] = ["l o 123", "lo w 1456", "e r</w> 1789", ""]
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase : Optional[Any] = Path(tmpdirname)
_UpperCAmelCase : str = build_dir / VOCAB_FILES_NAMES["src_vocab_file"]
_UpperCAmelCase : Union[str, Any] = build_dir / VOCAB_FILES_NAMES["tgt_vocab_file"]
_UpperCAmelCase : List[Any] = build_dir / VOCAB_FILES_NAMES["merges_file"]
with open(src_vocab_file, "w") as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, "w") as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, "w") as fp:
fp.write("\n".join(merges))
_UpperCAmelCase : int = FSMTTokenizer(
langs=["en", "ru"],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
_UpperCAmelCase : Tuple = FSMTConfig(
langs=["ru", "en"],
src_vocab_size=1_000,
tgt_vocab_size=1_000,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
_UpperCAmelCase : int = FSMTForConditionalGeneration(config)
print(F'''num of params {tiny_model.num_parameters()}''')
# Test
_UpperCAmelCase : List[str] = tokenizer(["Making tiny model"], return_tensors="pt")
_UpperCAmelCase : int = tiny_model(**batch)
print("test output:", len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F'''Generated {mname_tiny}''')
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 714 |
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
_UpperCAmelCase : Any = "\\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n"
_UpperCAmelCase : str = "\\nGLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n"
_UpperCAmelCase : List[str] = "\nCompute GLUE evaluation metric associated to each GLUE dataset.\nArgs:\n predictions: list of predictions to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\nReturns: depending on the GLUE subset, one or several of:\n \"accuracy\": Accuracy\n \"f1\": F1 score\n \"pearson\": Pearson Correlation\n \"spearmanr\": Spearman Correlation\n \"matthews_correlation\": Matthew Correlation\nExamples:\n\n >>> glue_metric = datasets.load_metric('glue', 'sst2') # 'sst2' or any of [\"mnli\", \"mnli_mismatched\", \"mnli_matched\", \"qnli\", \"rte\", \"wnli\", \"hans\"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'mrpc') # 'mrpc' or 'qqp'\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0, 'f1': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'stsb')\n >>> references = [0., 1., 2., 3., 4., 5.]\n >>> predictions = [0., 1., 2., 3., 4., 5.]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print({\"pearson\": round(results[\"pearson\"], 2), \"spearmanr\": round(results[\"spearmanr\"], 2)})\n {'pearson': 1.0, 'spearmanr': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'cola')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'matthews_correlation': 1.0}\n"
def A ( lowercase , lowercase ) -> List[str]:
'''simple docstring'''
return float((preds == labels).mean() )
def A ( lowercase , lowercase ) -> Tuple:
'''simple docstring'''
UpperCamelCase = simple_accuracy(lowercase , lowercase )
UpperCamelCase = float(fa_score(y_true=lowercase , y_pred=lowercase ) )
return {
"accuracy": acc,
"f1": fa,
}
def A ( lowercase , lowercase ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase = float(pearsonr(lowercase , lowercase )[0] )
UpperCamelCase = float(spearmanr(lowercase , lowercase )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
'references': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
} ) , codebase_urls=[] , reference_urls=[] , format='numpy' , )
def __UpperCamelCase ( self , A_ , A_ ) -> Any:
"""simple docstring"""
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(A_ , A_ )}
elif self.config_name == "stsb":
return pearson_and_spearman(A_ , A_ )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(A_ , A_ )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(A_ , A_ )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' )
| 3 | 0 |
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCAmelCase : Union[str, Any] = get_tests_dir("fixtures/spiece.model")
@require_sentencepiece
@require_tokenizers
class lowercase ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
__lowercase : Any = AlbertTokenizer
__lowercase : Tuple = AlbertTokenizerFast
__lowercase : int = True
__lowercase : str = True
__lowercase : Union[str, Any] = True
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
UpperCamelCase = AlbertTokenizer(A_ )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCamelCase ( self , A_ ) -> Tuple:
"""simple docstring"""
UpperCamelCase = 'this is a test'
UpperCamelCase = 'this is a test'
return input_text, output_text
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = '<pad>'
UpperCamelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A_ ) , A_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A_ ) , A_ )
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<pad>' )
self.assertEqual(vocab_keys[1] , '<unk>' )
self.assertEqual(vocab_keys[-1] , '▁eloquent' )
self.assertEqual(len(A_ ) , 30_000 )
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 30_000 )
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = self.get_rust_tokenizer()
UpperCamelCase = 'I was born in 92000, and this is falsé.'
UpperCamelCase = tokenizer.tokenize(A_ )
UpperCamelCase = rust_tokenizer.tokenize(A_ )
self.assertListEqual(A_ , A_ )
UpperCamelCase = tokenizer.encode(A_ , add_special_tokens=A_ )
UpperCamelCase = rust_tokenizer.encode(A_ , add_special_tokens=A_ )
self.assertListEqual(A_ , A_ )
UpperCamelCase = self.get_rust_tokenizer()
UpperCamelCase = tokenizer.encode(A_ )
UpperCamelCase = rust_tokenizer.encode(A_ )
self.assertListEqual(A_ , A_ )
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = AlbertTokenizer(A_ , keep_accents=A_ )
UpperCamelCase = tokenizer.tokenize('This is a test' )
self.assertListEqual(A_ , ['▁this', '▁is', '▁a', '▁test'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , [48, 25, 21, 1_289] )
UpperCamelCase = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
A_ , ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', 'é', '.'] )
UpperCamelCase = tokenizer.convert_tokens_to_ids(A_ )
self.assertListEqual(A_ , [31, 23, 386, 19, 561, 3_050, 15, 17, 48, 25, 8_256, 18, 1, 9] )
UpperCamelCase = tokenizer.convert_ids_to_tokens(A_ )
self.assertListEqual(
A_ , ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.'] , )
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = AlbertTokenizer(A_ )
UpperCamelCase = tokenizer.encode('sequence builders' )
UpperCamelCase = tokenizer.encode('multi-sequence build' )
UpperCamelCase = tokenizer.build_inputs_with_special_tokens(A_ )
UpperCamelCase = tokenizer.build_inputs_with_special_tokens(A_ , A_ )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = {'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'input_ids': [[2, 21_970, 13, 5, 6_092, 167, 28, 7_103, 2_153, 673, 8, 7_028, 12_051, 18, 17, 7_103, 2_153, 673, 8, 3_515, 18_684, 8, 4_461, 6, 1_927, 297, 8, 12_060, 2_607, 18, 13, 5, 4_461, 15, 10_538, 38, 8, 135, 15, 822, 58, 15, 993, 10_363, 15, 1_460, 8_005, 4_461, 15, 993, 255, 2_328, 9, 9, 9, 6, 26, 1_112, 816, 3_260, 13, 5, 103, 2_377, 6, 17, 1_112, 816, 2_782, 13, 5, 103, 10_641, 6, 29, 84, 2_512, 2_430, 782, 18_684, 2_761, 19, 808, 2_430, 2_556, 17, 855, 1_480, 9_477, 4_091, 128, 11_712, 15, 7_103, 2_153, 673, 17, 24_883, 9_990, 9, 3], [2, 11_502, 25, 1_006, 20, 782, 8, 11_809, 855, 1_732, 19_393, 18_667, 37, 367, 21_018, 69, 1_854, 34, 11_860, 19_124, 27, 156, 225, 17, 193, 4_141, 19, 65, 9_124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2_231, 886, 2_385, 17_659, 84, 14, 16_792, 1_952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A_ , model_name='albert-base-v2' , revision='6b6560eaf5ff2e250b00c50f380c5389a9c2d82e' , )
| 715 |
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
_UpperCAmelCase : str = "scheduler_config.json"
class lowercase ( _SCREAMING_SNAKE_CASE ):
__lowercase : Tuple = 1
__lowercase : int = 2
__lowercase : List[Any] = 3
__lowercase : str = 4
__lowercase : Optional[Any] = 5
@dataclass
class lowercase ( _SCREAMING_SNAKE_CASE ):
__lowercase : jnp.ndarray
class lowercase :
__lowercase : Union[str, Any] = SCHEDULER_CONFIG_NAME
__lowercase : Dict = ["dtype"]
__lowercase : List[Any] = []
__lowercase : Dict = True
@classmethod
def __UpperCamelCase ( cls , A_ = None , A_ = None , A_=False , **A_ , ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = cls.load_config(
pretrained_model_name_or_path=A_ , subfolder=A_ , return_unused_kwargs=A_ , **A_ , )
UpperCamelCase , UpperCamelCase = cls.from_config(A_ , return_unused_kwargs=A_ , **A_ )
if hasattr(A_ , 'create_state' ) and getattr(A_ , 'has_state' , A_ ):
UpperCamelCase = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def __UpperCamelCase ( self , A_ , A_ = False , **A_ ) -> str:
"""simple docstring"""
self.save_config(save_directory=A_ , push_to_hub=A_ , **A_ )
@property
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
return self._get_compatibles()
@classmethod
def __UpperCamelCase ( cls ) -> int:
"""simple docstring"""
UpperCamelCase = list(set([cls.__name__] + cls._compatibles ) )
UpperCamelCase = importlib.import_module(__name__.split('.' )[0] )
UpperCamelCase = [
getattr(A_ , A_ ) for c in compatible_classes_str if hasattr(A_ , A_ )
]
return compatible_classes
def A ( lowercase , lowercase ) -> jnp.ndarray:
'''simple docstring'''
assert len(lowercase ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(lowercase ) - x.ndim) ) , lowercase )
def A ( lowercase , lowercase=0.9_9_9 , lowercase=jnp.floataa ) -> jnp.ndarray:
'''simple docstring'''
def alpha_bar(lowercase ):
return math.cos((time_step + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
UpperCamelCase = []
for i in range(lowercase ):
UpperCamelCase = i / num_diffusion_timesteps
UpperCamelCase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(lowercase ) / alpha_bar(lowercase ) , lowercase ) )
return jnp.array(lowercase , dtype=lowercase )
@flax.struct.dataclass
class lowercase :
__lowercase : jnp.ndarray
__lowercase : jnp.ndarray
__lowercase : jnp.ndarray
@classmethod
def __UpperCamelCase ( cls , A_ ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = scheduler.config
if config.trained_betas is not None:
UpperCamelCase = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
UpperCamelCase = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
UpperCamelCase = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
UpperCamelCase = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
F'''beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}''' )
UpperCamelCase = 1.0 - betas
UpperCamelCase = jnp.cumprod(A_ , axis=0 )
return cls(
alphas=A_ , betas=A_ , alphas_cumprod=A_ , )
def A ( lowercase , lowercase , lowercase , lowercase ) -> List[Any]:
'''simple docstring'''
UpperCamelCase = state.alphas_cumprod
UpperCamelCase = alphas_cumprod[timesteps] ** 0.5
UpperCamelCase = sqrt_alpha_prod.flatten()
UpperCamelCase = broadcast_to_shape_from_left(lowercase , original_samples.shape )
UpperCamelCase = (1 - alphas_cumprod[timesteps]) ** 0.5
UpperCamelCase = sqrt_one_minus_alpha_prod.flatten()
UpperCamelCase = broadcast_to_shape_from_left(lowercase , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def A ( lowercase , lowercase , lowercase , lowercase ) -> Dict:
'''simple docstring'''
UpperCamelCase , UpperCamelCase = get_sqrt_alpha_prod(lowercase , lowercase , lowercase , lowercase )
UpperCamelCase = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def A ( lowercase , lowercase , lowercase , lowercase ) -> int:
'''simple docstring'''
UpperCamelCase , UpperCamelCase = get_sqrt_alpha_prod(lowercase , lowercase , lowercase , lowercase )
UpperCamelCase = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 3 | 0 |
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse("0.8.3"):
raise Exception("requires gluonnlp == 0.8.3")
if version.parse(mx.__version__) != version.parse("1.5.0"):
raise Exception("requires mxnet == 1.5.0")
logging.set_verbosity_info()
_UpperCAmelCase : Tuple = logging.get_logger(__name__)
_UpperCAmelCase : List[str] = """The Nymphenburg Palace is a beautiful palace in Munich!"""
def A ( lowercase , lowercase ) -> int:
'''simple docstring'''
UpperCamelCase = {
'attention_cell': 'multi_head',
'num_layers': 4,
'units': 1_024,
'hidden_size': 768,
'max_length': 512,
'num_heads': 8,
'scaled': True,
'dropout': 0.1,
'use_residual': True,
'embed_size': 1_024,
'embed_dropout': 0.1,
'word_embed': None,
'layer_norm_eps': 1e-5,
'token_type_vocab_size': 2,
}
UpperCamelCase = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
UpperCamelCase = BERTEncoder(
attention_cell=predefined_args['attention_cell'] , num_layers=predefined_args['num_layers'] , units=predefined_args['units'] , hidden_size=predefined_args['hidden_size'] , max_length=predefined_args['max_length'] , num_heads=predefined_args['num_heads'] , scaled=predefined_args['scaled'] , dropout=predefined_args['dropout'] , output_attention=lowercase , output_all_encodings=lowercase , use_residual=predefined_args['use_residual'] , activation=predefined_args.get('activation' , 'gelu' ) , layer_norm_eps=predefined_args.get('layer_norm_eps' , lowercase ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
UpperCamelCase = 'openwebtext_ccnews_stories_books_cased'
# Specify download folder to Gluonnlp's vocab
UpperCamelCase = os.path.join(get_home_dir() , 'models' )
UpperCamelCase = _load_vocab(lowercase , lowercase , lowercase , cls=lowercase )
UpperCamelCase = nlp.model.BERTModel(
lowercase , len(lowercase ) , units=predefined_args['units'] , embed_size=predefined_args['embed_size'] , embed_dropout=predefined_args['embed_dropout'] , word_embed=predefined_args['word_embed'] , use_pooler=lowercase , use_token_type_embed=lowercase , token_type_vocab_size=predefined_args['token_type_vocab_size'] , use_classifier=lowercase , use_decoder=lowercase , )
original_bort.load_parameters(lowercase , cast_dtype=lowercase , ignore_extra=lowercase )
UpperCamelCase = original_bort._collect_params_with_prefix()
# Build our config 🤗
UpperCamelCase = {
'architectures': ['BertForMaskedLM'],
'attention_probs_dropout_prob': predefined_args['dropout'],
'hidden_act': 'gelu',
'hidden_dropout_prob': predefined_args['dropout'],
'hidden_size': predefined_args['embed_size'],
'initializer_range': 0.0_2,
'intermediate_size': predefined_args['hidden_size'],
'layer_norm_eps': predefined_args['layer_norm_eps'],
'max_position_embeddings': predefined_args['max_length'],
'model_type': 'bort',
'num_attention_heads': predefined_args['num_heads'],
'num_hidden_layers': predefined_args['num_layers'],
'pad_token_id': 1, # 2 = BERT, 1 = RoBERTa
'type_vocab_size': 1, # 2 = BERT, 1 = RoBERTa
'vocab_size': len(lowercase ),
}
UpperCamelCase = BertConfig.from_dict(lowercase )
UpperCamelCase = BertForMaskedLM(lowercase )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(lowercase ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(lowercase , lowercase ):
UpperCamelCase = hf_param.shape
UpperCamelCase = to_torch(params[gluon_param] )
UpperCamelCase = gluon_param.shape
assert (
shape_hf == shape_gluon
), f'''The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers'''
return gluon_param
UpperCamelCase = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , 'word_embed.0.weight' )
UpperCamelCase = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , 'encoder.position_weight' )
UpperCamelCase = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , 'encoder.layer_norm.beta' )
UpperCamelCase = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , 'encoder.layer_norm.gamma' )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
UpperCamelCase = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
UpperCamelCase = hf_bort_model.bert.encoder.layer[i]
# self attention
UpperCamelCase = layer.attention.self
UpperCamelCase = check_and_map_params(
self_attn.key.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_key.bias''' )
UpperCamelCase = check_and_map_params(
self_attn.key.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_key.weight''' )
UpperCamelCase = check_and_map_params(
self_attn.query.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_query.bias''' )
UpperCamelCase = check_and_map_params(
self_attn.query.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_query.weight''' )
UpperCamelCase = check_and_map_params(
self_attn.value.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_value.bias''' )
UpperCamelCase = check_and_map_params(
self_attn.value.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_value.weight''' )
# self attention output
UpperCamelCase = layer.attention.output
UpperCamelCase = check_and_map_params(
self_output.dense.bias , f'''encoder.transformer_cells.{i}.proj.bias''' )
UpperCamelCase = check_and_map_params(
self_output.dense.weight , f'''encoder.transformer_cells.{i}.proj.weight''' )
UpperCamelCase = check_and_map_params(
self_output.LayerNorm.bias , f'''encoder.transformer_cells.{i}.layer_norm.beta''' )
UpperCamelCase = check_and_map_params(
self_output.LayerNorm.weight , f'''encoder.transformer_cells.{i}.layer_norm.gamma''' )
# intermediate
UpperCamelCase = layer.intermediate
UpperCamelCase = check_and_map_params(
intermediate.dense.bias , f'''encoder.transformer_cells.{i}.ffn.ffn_1.bias''' )
UpperCamelCase = check_and_map_params(
intermediate.dense.weight , f'''encoder.transformer_cells.{i}.ffn.ffn_1.weight''' )
# output
UpperCamelCase = layer.output
UpperCamelCase = check_and_map_params(
bert_output.dense.bias , f'''encoder.transformer_cells.{i}.ffn.ffn_2.bias''' )
UpperCamelCase = check_and_map_params(
bert_output.dense.weight , f'''encoder.transformer_cells.{i}.ffn.ffn_2.weight''' )
UpperCamelCase = check_and_map_params(
bert_output.LayerNorm.bias , f'''encoder.transformer_cells.{i}.ffn.layer_norm.beta''' )
UpperCamelCase = check_and_map_params(
bert_output.LayerNorm.weight , f'''encoder.transformer_cells.{i}.ffn.layer_norm.gamma''' )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
UpperCamelCase = RobertaTokenizer.from_pretrained('roberta-base' )
UpperCamelCase = tokenizer.encode_plus(lowercase )['input_ids']
# Get gluon output
UpperCamelCase = mx.nd.array([input_ids] )
UpperCamelCase = original_bort(inputs=lowercase , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(lowercase )
UpperCamelCase = BertModel.from_pretrained(lowercase )
hf_bort_model.eval()
UpperCamelCase = tokenizer.encode_plus(lowercase , return_tensors='pt' )
UpperCamelCase = hf_bort_model(**lowercase )[0]
UpperCamelCase = output_gluon[0].asnumpy()
UpperCamelCase = output_hf[0].detach().numpy()
UpperCamelCase = np.max(np.abs(hf_layer - gluon_layer ) ).item()
UpperCamelCase = np.allclose(lowercase , lowercase , atol=1e-3 )
if success:
print('✔️ Both model do output the same tensors' )
else:
print('❌ Both model do **NOT** output the same tensors' )
print('Absolute difference is:' , lowercase )
if __name__ == "__main__":
_UpperCAmelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--bort_checkpoint_path", default=None, type=str, required=True, help="Path the official Bort params file."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_UpperCAmelCase : List[Any] = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 716 |
from abc import ABC, abstractmethod
from typing import List, Optional
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self ) -> Optional[Any]:
"""simple docstring"""
# test for the above condition
self.test()
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = 0
UpperCamelCase = False
while not completed:
if counter == 1:
self.reset()
UpperCamelCase = self.advance()
if not self.does_advance(A_ ):
raise Exception(
'Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.' )
UpperCamelCase , UpperCamelCase , UpperCamelCase = self.update(A_ )
counter += 1
if counter > 10_000:
raise Exception('update() does not fulfill the constraint.' )
if self.remaining() != 0:
raise Exception('Custom Constraint is not defined correctly.' )
@abstractmethod
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __UpperCamelCase ( self , A_ ) -> str:
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __UpperCamelCase ( self , A_ ) -> int:
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __UpperCamelCase ( self , A_=False ) -> int:
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ ) -> Any:
"""simple docstring"""
super(A_ , self ).__init__()
if not isinstance(A_ , A_ ) or len(A_ ) == 0:
raise ValueError(F'''`token_ids` has to be a non-empty list, but is {token_ids}.''' )
if any((not isinstance(A_ , A_ ) or token_id < 0) for token_id in token_ids ):
raise ValueError(F'''Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.''' )
UpperCamelCase = token_ids
UpperCamelCase = len(self.token_ids )
UpperCamelCase = -1 # the index of the currently fulfilled step
UpperCamelCase = False
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def __UpperCamelCase ( self , A_ ) -> Optional[int]:
"""simple docstring"""
if not isinstance(A_ , A_ ):
raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(A_ )}''' )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def __UpperCamelCase ( self , A_ ) -> Optional[int]:
"""simple docstring"""
if not isinstance(A_ , A_ ):
raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(A_ )}''' )
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
if self.does_advance(A_ ):
self.fulfilled_idx += 1
UpperCamelCase = True
if self.fulfilled_idx == (self.seqlen - 1):
UpperCamelCase = True
UpperCamelCase = completed
else:
# failed to make progress.
UpperCamelCase = True
self.reset()
return stepped, completed, reset
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = False
UpperCamelCase = 0
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
return self.seqlen - (self.fulfilled_idx + 1)
def __UpperCamelCase ( self , A_=False ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = PhrasalConstraint(self.token_ids )
if stateful:
UpperCamelCase = self.seqlen
UpperCamelCase = self.fulfilled_idx
UpperCamelCase = self.completed
return new_constraint
class lowercase :
def __init__( self , A_ , A_=True ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = max([len(A_ ) for one in nested_token_ids] )
UpperCamelCase = {}
for token_ids in nested_token_ids:
UpperCamelCase = root
for tidx, token_id in enumerate(A_ ):
if token_id not in level:
UpperCamelCase = {}
UpperCamelCase = level[token_id]
if no_subsets and self.has_subsets(A_ , A_ ):
raise ValueError(
'Each list in `nested_token_ids` can\'t be a complete subset of another list, but is'
F''' {nested_token_ids}.''' )
UpperCamelCase = root
def __UpperCamelCase ( self , A_ ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = self.trie
for current_token in current_seq:
UpperCamelCase = start[current_token]
UpperCamelCase = list(start.keys() )
return next_tokens
def __UpperCamelCase ( self , A_ ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = self.next_tokens(A_ )
return len(A_ ) == 0
def __UpperCamelCase ( self , A_ ) -> List[str]:
"""simple docstring"""
UpperCamelCase = list(root.values() )
if len(A_ ) == 0:
return 1
else:
return sum([self.count_leaves(A_ ) for nn in next_nodes] )
def __UpperCamelCase ( self , A_ , A_ ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = self.count_leaves(A_ )
return len(A_ ) != leaf_count
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ ) -> str:
"""simple docstring"""
super(A_ , self ).__init__()
if not isinstance(A_ , A_ ) or len(A_ ) == 0:
raise ValueError(F'''`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.''' )
if any(not isinstance(A_ , A_ ) for token_ids in nested_token_ids ):
raise ValueError(F'''`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.''' )
if any(
any((not isinstance(A_ , A_ ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
F'''Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.''' )
UpperCamelCase = DisjunctiveTrie(A_ )
UpperCamelCase = nested_token_ids
UpperCamelCase = self.trie.max_height
UpperCamelCase = []
UpperCamelCase = False
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = self.trie.next_tokens(self.current_seq )
if len(A_ ) == 0:
return None
else:
return token_list
def __UpperCamelCase ( self , A_ ) -> Optional[Any]:
"""simple docstring"""
if not isinstance(A_ , A_ ):
raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(A_ )}''' )
UpperCamelCase = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def __UpperCamelCase ( self , A_ ) -> Optional[Any]:
"""simple docstring"""
if not isinstance(A_ , A_ ):
raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(A_ )}''' )
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
if self.does_advance(A_ ):
self.current_seq.append(A_ )
UpperCamelCase = True
else:
UpperCamelCase = True
self.reset()
UpperCamelCase = self.trie.reached_leaf(self.current_seq )
UpperCamelCase = completed
return stepped, completed, reset
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
UpperCamelCase = False
UpperCamelCase = []
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def __UpperCamelCase ( self , A_=False ) -> int:
"""simple docstring"""
UpperCamelCase = DisjunctiveConstraint(self.token_ids )
if stateful:
UpperCamelCase = self.seqlen
UpperCamelCase = self.current_seq
UpperCamelCase = self.completed
return new_constraint
class lowercase :
def __init__( self , A_ ) -> Tuple:
"""simple docstring"""
UpperCamelCase = constraints
# max # of steps required to fulfill a given constraint
UpperCamelCase = max([c.seqlen for c in constraints] )
UpperCamelCase = len(A_ )
UpperCamelCase = False
self.init_state()
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = []
UpperCamelCase = None
UpperCamelCase = [constraint.copy(stateful=A_ ) for constraint in self.constraints]
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
UpperCamelCase = constraint.advance()
if isinstance(A_ , A_ ):
token_list.append(A_ )
elif isinstance(A_ , A_ ):
token_list.extend(A_ )
else:
UpperCamelCase = self.inprogress_constraint.advance()
if isinstance(A_ , A_ ):
token_list.append(A_ )
elif isinstance(A_ , A_ ):
token_list.extend(A_ )
if len(A_ ) == 0:
return None
else:
return token_list
def __UpperCamelCase ( self , A_ ) -> Any:
"""simple docstring"""
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
UpperCamelCase , UpperCamelCase = self.add(A_ )
# the entire list of constraints are fulfilled
if self.completed:
break
def __UpperCamelCase ( self , A_ ) -> int:
"""simple docstring"""
if not isinstance(A_ , A_ ):
raise ValueError(F'''`token_id` should be an `int`, but is `{token_id}`.''' )
UpperCamelCase , UpperCamelCase = False, False
if self.completed:
UpperCamelCase = True
UpperCamelCase = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
UpperCamelCase , UpperCamelCase , UpperCamelCase = self.inprogress_constraint.update(A_ )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=A_ ) )
UpperCamelCase = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
UpperCamelCase = None
if len(self.pending_constraints ) == 0:
# we're done!
UpperCamelCase = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(A_ ):
UpperCamelCase , UpperCamelCase , UpperCamelCase = pending_constraint.update(A_ )
if not stepped:
raise Exception(
'`constraint.update(token_id)` is not yielding incremental progress, '
'even though `constraint.does_advance(token_id)` is true.' )
if complete:
self.complete_constraints.append(A_ )
UpperCamelCase = None
if not complete and stepped:
UpperCamelCase = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
UpperCamelCase = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
UpperCamelCase = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def __UpperCamelCase ( self , A_=True ) -> Tuple:
"""simple docstring"""
UpperCamelCase = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
UpperCamelCase = [
constraint.copy(stateful=A_ ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
UpperCamelCase = self.inprogress_constraint.copy(stateful=A_ )
UpperCamelCase = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 3 | 0 |
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
_UpperCAmelCase : str = {
"susnato/ernie-m-base_pytorch": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json",
"susnato/ernie-m-large_pytorch": "https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json",
}
class lowercase ( __lowerCAmelCase ):
__lowercase : Any = "ernie_m"
__lowercase : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self , A_ = 250_002 , A_ = 768 , A_ = 12 , A_ = 12 , A_ = 3_072 , A_ = "gelu" , A_ = 0.1 , A_ = 0.1 , A_ = 514 , A_ = 0.02 , A_ = 1 , A_ = 1e-05 , A_=None , A_=False , A_=0.0 , **A_ , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(pad_token_id=lowerCamelCase__ , **lowerCamelCase__ )
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
UpperCamelCase = classifier_dropout
UpperCamelCase = is_decoder
UpperCamelCase = act_dropout
| 717 |
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
_UpperCAmelCase : str = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
@register_to_config
def __init__( self , A_ , A_ = None , A_ = None ) -> Any:
"""simple docstring"""
super().__init__()
UpperCamelCase = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
UpperCamelCase = torch.zeros(A_ , A_ )
else:
UpperCamelCase = None
UpperCamelCase = torch.nn.Parameter(A_ )
class lowercase ( _SCREAMING_SNAKE_CASE ):
__lowercase : VQModel
__lowercase : CLIPTextModel
__lowercase : CLIPTokenizer
__lowercase : TransformeraDModel
__lowercase : LearnedClassifierFreeSamplingEmbeddings
__lowercase : VQDiffusionScheduler
def __init__( self , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
self.register_modules(
vqvae=A_ , transformer=A_ , text_encoder=A_ , tokenizer=A_ , scheduler=A_ , learned_classifier_free_sampling_embeddings=A_ , )
def __UpperCamelCase ( self , A_ , A_ , A_ ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = len(A_ ) if isinstance(A_ , A_ ) else 1
# get prompt text embeddings
UpperCamelCase = self.tokenizer(
A_ , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
UpperCamelCase = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
UpperCamelCase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
F''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
UpperCamelCase = text_input_ids[:, : self.tokenizer.model_max_length]
UpperCamelCase = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
UpperCamelCase = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=A_ )
# duplicate text embeddings for each generation per prompt
UpperCamelCase = prompt_embeds.repeat_interleave(A_ , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
UpperCamelCase = self.learned_classifier_free_sampling_embeddings.embeddings
UpperCamelCase = negative_prompt_embeds.unsqueeze(0 ).repeat(A_ , 1 , 1 )
else:
UpperCamelCase = [''] * batch_size
UpperCamelCase = text_input_ids.shape[-1]
UpperCamelCase = self.tokenizer(
A_ , padding='max_length' , max_length=A_ , truncation=A_ , return_tensors='pt' , )
UpperCamelCase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
UpperCamelCase = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=A_ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCamelCase = negative_prompt_embeds.shape[1]
UpperCamelCase = negative_prompt_embeds.repeat(1 , A_ , 1 )
UpperCamelCase = negative_prompt_embeds.view(batch_size * num_images_per_prompt , A_ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCamelCase = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self , A_ , A_ = 100 , A_ = 5.0 , A_ = 1.0 , A_ = 1 , A_ = None , A_ = None , A_ = "pil" , A_ = True , A_ = None , A_ = 1 , ) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
if isinstance(A_ , A_ ):
UpperCamelCase = 1
elif isinstance(A_ , A_ ):
UpperCamelCase = len(A_ )
else:
raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(A_ )}''' )
UpperCamelCase = batch_size * num_images_per_prompt
UpperCamelCase = guidance_scale > 1.0
UpperCamelCase = self._encode_prompt(A_ , A_ , A_ )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(A_ , A_ ) or callback_steps <= 0)
):
raise ValueError(
F'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
F''' {type(A_ )}.''' )
# get the initial completely masked latents unless the user supplied it
UpperCamelCase = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
UpperCamelCase = self.transformer.num_vector_embeds - 1
UpperCamelCase = torch.full(A_ , A_ ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'
F''' {self.transformer.num_vector_embeds - 1} (inclusive).''' )
UpperCamelCase = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(A_ , device=self.device )
UpperCamelCase = self.scheduler.timesteps.to(self.device )
UpperCamelCase = latents
for i, t in enumerate(self.progress_bar(A_ ) ):
# expand the sample if we are doing classifier free guidance
UpperCamelCase = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
UpperCamelCase = self.transformer(A_ , encoder_hidden_states=A_ , timestep=A_ ).sample
if do_classifier_free_guidance:
UpperCamelCase , UpperCamelCase = model_output.chunk(2 )
UpperCamelCase = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(A_ , dim=1 , keepdim=A_ )
UpperCamelCase = self.truncate(A_ , A_ )
# remove `log(0)`'s (`-inf`s)
UpperCamelCase = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase = self.scheduler.step(A_ , timestep=A_ , sample=A_ , generator=A_ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(A_ , A_ , A_ )
UpperCamelCase = self.vqvae.config.vq_embed_dim
UpperCamelCase = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
UpperCamelCase = self.vqvae.quantize.get_codebook_entry(A_ , shape=A_ )
UpperCamelCase = self.vqvae.decode(A_ , force_not_quantize=A_ ).sample
UpperCamelCase = (image / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCamelCase = self.numpy_to_pil(A_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A_ )
def __UpperCamelCase ( self , A_ , A_ ) -> torch.FloatTensor:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = torch.sort(A_ , 1 , descending=A_ )
UpperCamelCase = torch.exp(A_ )
UpperCamelCase = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
UpperCamelCase = torch.full_like(keep_mask[:, 0:1, :] , A_ )
UpperCamelCase = torch.cat((all_true, keep_mask) , dim=1 )
UpperCamelCase = keep_mask[:, :-1, :]
UpperCamelCase = keep_mask.gather(1 , indices.argsort(1 ) )
UpperCamelCase = log_p_x_0.clone()
UpperCamelCase = -torch.inf # -inf = log(0)
return rv
| 3 | 0 |
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def A ( ) -> Dict:
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(snake_case_ ):
requests.request('GET' , 'https://huggingface.co' )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request('GET' , 'https://huggingface.co' , timeout=1.0 )
@pytest.mark.integration
def A ( ) -> Optional[Any]:
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request('GET' , 'https://huggingface.co' )
def A ( ) -> Optional[Any]:
'''simple docstring'''
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(snake_case_ ):
http_head('https://huggingface.co' )
| 718 |
from string import ascii_uppercase
_UpperCAmelCase : Dict = {char: i for i, char in enumerate(ascii_uppercase)}
_UpperCAmelCase : Tuple = dict(enumerate(ascii_uppercase))
def A ( lowercase , lowercase ) -> str:
'''simple docstring'''
UpperCamelCase = len(lowercase )
UpperCamelCase = 0
while True:
if x == i:
UpperCamelCase = 0
if len(lowercase ) == len(lowercase ):
break
key += key[i]
i += 1
return key
def A ( lowercase , lowercase ) -> str:
'''simple docstring'''
UpperCamelCase = ''
UpperCamelCase = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
UpperCamelCase = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def A ( lowercase , lowercase ) -> str:
'''simple docstring'''
UpperCamelCase = ''
UpperCamelCase = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
UpperCamelCase = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def A ( ) -> None:
'''simple docstring'''
UpperCamelCase = 'THE GERMAN ATTACK'
UpperCamelCase = 'SECRET'
UpperCamelCase = generate_key(lowercase , lowercase )
UpperCamelCase = cipher_text(lowercase , lowercase )
print(f'''Encrypted Text = {s}''' )
print(f'''Original Text = {original_text(lowercase , lowercase )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 3 | 0 |
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class lowercase ( lowercase_ , unittest.TestCase ):
__lowercase : Optional[int] = MvpTokenizer
__lowercase : Optional[int] = MvpTokenizerFast
__lowercase : Optional[int] = True
__lowercase : Tuple = filter_roberta_detectors
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
super().setUp()
UpperCamelCase = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
UpperCamelCase = dict(zip(A_ , range(len(A_ ) ) ) )
UpperCamelCase = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
UpperCamelCase = {'unk_token': '<unk>'}
UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(A_ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(A_ ) )
def __UpperCamelCase ( self , **A_ ) -> Any:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **A_ )
def __UpperCamelCase ( self , **A_ ) -> str:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **A_ )
def __UpperCamelCase ( self , A_ ) -> List[str]:
"""simple docstring"""
return "lower newer", "lower newer"
@cached_property
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
return MvpTokenizer.from_pretrained('RUCAIBox/mvp' )
@cached_property
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
return MvpTokenizerFast.from_pretrained('RUCAIBox/mvp' )
@require_torch
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
UpperCamelCase = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
UpperCamelCase = [0, 250, 251, 17_818, 13, 39_186, 1_938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase = tokenizer(A_ , max_length=len(A_ ) , padding=A_ , return_tensors='pt' )
self.assertIsInstance(A_ , A_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
UpperCamelCase = batch.input_ids.tolist()[0]
self.assertListEqual(A_ , A_ )
# Test that special tokens are reset
@require_torch
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
UpperCamelCase = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase = tokenizer(A_ , padding=A_ , return_tensors='pt' )
# check if input_ids are returned and no labels
self.assertIn('input_ids' , A_ )
self.assertIn('attention_mask' , A_ )
self.assertNotIn('labels' , A_ )
self.assertNotIn('decoder_attention_mask' , A_ )
@require_torch
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = [
'Summary of the text.',
'Another summary.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase = tokenizer(text_target=A_ , max_length=32 , padding='max_length' , return_tensors='pt' )
self.assertEqual(32 , targets['input_ids'].shape[1] )
@require_torch
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase = tokenizer(
['I am a small frog' * 1_024, 'I am a small frog'] , padding=A_ , truncation=A_ , return_tensors='pt' )
self.assertIsInstance(A_ , A_ )
self.assertEqual(batch.input_ids.shape , (2, 1_024) )
@require_torch
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = ['A long paragraph for summarization.']
UpperCamelCase = [
'Summary of the text.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase = tokenizer(A_ , text_target=A_ , return_tensors='pt' )
UpperCamelCase = inputs['input_ids']
UpperCamelCase = inputs['labels']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
pass
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCamelCase = self.rust_tokenizer_class.from_pretrained(A_ , **A_ )
UpperCamelCase = self.tokenizer_class.from_pretrained(A_ , **A_ )
UpperCamelCase = 'A, <mask> AllenNLP sentence.'
UpperCamelCase = tokenizer_r.encode_plus(A_ , add_special_tokens=A_ , return_token_type_ids=A_ )
UpperCamelCase = tokenizer_p.encode_plus(A_ , add_special_tokens=A_ , return_token_type_ids=A_ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
UpperCamelCase = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
UpperCamelCase = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
A_ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
A_ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
| 719 |
from collections.abc import Callable
def A ( lowercase , lowercase , lowercase ) -> float:
'''simple docstring'''
UpperCamelCase = a
UpperCamelCase = b
if function(lowercase ) == 0: # one of the a or b is a root for the function
return a
elif function(lowercase ) == 0:
return b
elif (
function(lowercase ) * function(lowercase ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('could not find root in given interval.' )
else:
UpperCamelCase = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(lowercase ) == 0:
return mid
elif function(lowercase ) * function(lowercase ) < 0:
UpperCamelCase = mid
else:
UpperCamelCase = mid
UpperCamelCase = start + (end - start) / 2.0
return mid
def A ( lowercase ) -> float:
'''simple docstring'''
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1_000))
import doctest
doctest.testmod()
| 3 | 0 |
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
_UpperCAmelCase : int = TypeVar("T")
class lowercase ( Generic[T] ):
def __init__( self , A_ = True ) -> None:
"""simple docstring"""
UpperCamelCase = {} # dictionary of lists
UpperCamelCase = directed
def __UpperCamelCase ( self , A_ , A_ ) -> GraphAdjacencyList[T]:
"""simple docstring"""
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(A_ )
self.adj_list[destination_vertex].append(A_ )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(A_ )
UpperCamelCase = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(A_ )
UpperCamelCase = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
UpperCamelCase = [destination_vertex]
UpperCamelCase = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(A_ )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(A_ )
UpperCamelCase = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
UpperCamelCase = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
UpperCamelCase = [destination_vertex]
UpperCamelCase = []
return self
def __repr__( self ) -> str:
"""simple docstring"""
return pformat(self.adj_list )
| 720 |
import os
_UpperCAmelCase : int = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 100, "D": 500, "M": 1_000}
def A ( lowercase ) -> int:
'''simple docstring'''
UpperCamelCase = 0
UpperCamelCase = 0
while index < len(lowercase ) - 1:
UpperCamelCase = SYMBOLS[numerals[index]]
UpperCamelCase = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def A ( lowercase ) -> str:
'''simple docstring'''
UpperCamelCase = ''
UpperCamelCase = num // 1_000
numerals += m_count * "M"
num %= 1_000
UpperCamelCase = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
UpperCamelCase = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def A ( lowercase = "/p089_roman.txt" ) -> int:
'''simple docstring'''
UpperCamelCase = 0
with open(os.path.dirname(lowercase ) + roman_numerals_filename ) as filea:
UpperCamelCase = filea.readlines()
for line in lines:
UpperCamelCase = line.strip()
UpperCamelCase = parse_roman_numerals(lowercase )
UpperCamelCase = generate_roman_numerals(lowercase )
savings += len(lowercase ) - len(lowercase )
return savings
if __name__ == "__main__":
print(F'''{solution() = }''')
| 3 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCAmelCase : Optional[Any] = {
"configuration_clap": [
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapAudioConfig",
"ClapConfig",
"ClapTextConfig",
],
"processing_clap": ["ClapProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Optional[Any] = [
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapModel",
"ClapPreTrainedModel",
"ClapTextModel",
"ClapTextModelWithProjection",
"ClapAudioModel",
"ClapAudioModelWithProjection",
]
_UpperCAmelCase : Dict = ["ClapFeatureExtractor"]
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
_UpperCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 721 |
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize('dataset_size' , [None, 400 * 2**20, 600 * 2**20] )
@pytest.mark.parametrize('input_in_memory_max_size' , ['default', 0, 100 * 2**20, 900 * 2**20] )
def A ( lowercase , lowercase , lowercase ) -> Union[str, Any]:
'''simple docstring'''
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , 'IN_MEMORY_MAX_SIZE' , lowercase )
UpperCamelCase = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
UpperCamelCase = dataset_size < in_memory_max_size
else:
UpperCamelCase = False
UpperCamelCase = is_small_dataset(lowercase )
assert result == expected
| 3 | 0 |
import os
_UpperCAmelCase : int = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 100, "D": 500, "M": 1_000}
def A ( lowercase ) -> int:
'''simple docstring'''
UpperCamelCase = 0
UpperCamelCase = 0
while index < len(lowercase ) - 1:
UpperCamelCase = SYMBOLS[numerals[index]]
UpperCamelCase = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def A ( lowercase ) -> str:
'''simple docstring'''
UpperCamelCase = ''
UpperCamelCase = num // 1_000
numerals += m_count * "M"
num %= 1_000
UpperCamelCase = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
UpperCamelCase = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def A ( lowercase = "/p089_roman.txt" ) -> int:
'''simple docstring'''
UpperCamelCase = 0
with open(os.path.dirname(lowercase ) + roman_numerals_filename ) as filea:
UpperCamelCase = filea.readlines()
for line in lines:
UpperCamelCase = line.strip()
UpperCamelCase = parse_roman_numerals(lowercase )
UpperCamelCase = generate_roman_numerals(lowercase )
savings += len(lowercase ) - len(lowercase )
return savings
if __name__ == "__main__":
print(F'''{solution() = }''')
| 700 |
def A ( lowercase , lowercase ) -> str:
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
UpperCamelCase = str(bin(lowercase ) )[2:] # remove the leading "0b"
UpperCamelCase = str(bin(lowercase ) )[2:] # remove the leading "0b"
UpperCamelCase = max(len(lowercase ) , len(lowercase ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(lowercase ) , b_binary.zfill(lowercase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 3 | 0 |
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
_UpperCAmelCase : Tuple = "sshleifer/bart-tiny-random"
_UpperCAmelCase : List[str] = "patrickvonplaten/t5-tiny-random"
@require_torch
class lowercase ( unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
return AutoConfig.from_pretrained(A_ )
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase , *UpperCamelCase = create_student_by_copying_alternating_layers(A_ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
UpperCamelCase , *UpperCamelCase = create_student_by_copying_alternating_layers(A_ , tempfile.mkdtemp() , e=1 , d=A_ )
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
UpperCamelCase , *UpperCamelCase = create_student_by_copying_alternating_layers(A_ , tempfile.mkdtemp() , e=1 , d=A_ )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase , *UpperCamelCase = create_student_by_copying_alternating_layers(A_ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
with self.assertRaises(A_ ):
create_student_by_copying_alternating_layers(A_ , tempfile.mkdtemp() , e=A_ , d=A_ )
| 701 |
import re
def A ( lowercase ) -> str:
'''simple docstring'''
if len(re.findall('[ATCG]' , lowercase ) ) != len(lowercase ):
raise ValueError('Invalid Strand' )
return dna.translate(dna.maketrans('ATCG' , 'TAGC' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 3 | 0 |
import colorsys
from PIL import Image # type: ignore
def A ( lowercase , lowercase , lowercase ) -> float:
'''simple docstring'''
UpperCamelCase = x
UpperCamelCase = y
for step in range(lowercase ): # noqa: B007
UpperCamelCase = a * a - b * b + x
UpperCamelCase = 2 * a * b + y
UpperCamelCase = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def A ( lowercase ) -> tuple:
'''simple docstring'''
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def A ( lowercase ) -> tuple:
'''simple docstring'''
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(lowercase , 1 , 1 ) )
def A ( lowercase = 800 , lowercase = 600 , lowercase = -0.6 , lowercase = 0 , lowercase = 3.2 , lowercase = 50 , lowercase = True , ) -> Image.Image:
'''simple docstring'''
UpperCamelCase = Image.new('RGB' , (image_width, image_height) )
UpperCamelCase = img.load()
# loop through the image-coordinates
for image_x in range(lowercase ):
for image_y in range(lowercase ):
# determine the figure-coordinates based on the image-coordinates
UpperCamelCase = figure_width / image_width * image_height
UpperCamelCase = figure_center_x + (image_x / image_width - 0.5) * figure_width
UpperCamelCase = figure_center_y + (image_y / image_height - 0.5) * figure_height
UpperCamelCase = get_distance(lowercase , lowercase , lowercase )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
UpperCamelCase = get_color_coded_rgb(lowercase )
else:
UpperCamelCase = get_black_and_white_rgb(lowercase )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
_UpperCAmelCase : List[Any] = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show() | 702 |
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class lowercase ( _SCREAMING_SNAKE_CASE ):
__lowercase : Dict = (DDPMScheduler,)
def __UpperCamelCase ( self , **A_ ) -> Dict:
"""simple docstring"""
UpperCamelCase = {
'num_train_timesteps': 1_000,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'variance_type': 'fixed_small',
'clip_sample': True,
}
config.update(**A_ )
return config
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=A_ )
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=A_ , beta_end=A_ )
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=A_ )
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=A_ )
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=A_ )
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
self.check_over_configs(thresholding=A_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=A_ , prediction_type=A_ , sample_max_value=A_ , )
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=A_ )
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
for t in [0, 500, 999]:
self.check_over_forward(time_step=A_ )
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**A_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1e-5
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**A_ )
UpperCamelCase = len(A_ )
UpperCamelCase = self.dummy_model()
UpperCamelCase = self.dummy_sample_deter
UpperCamelCase = torch.manual_seed(0 )
for t in reversed(range(A_ ) ):
# 1. predict noise residual
UpperCamelCase = model(A_ , A_ )
# 2. predict previous mean of sample x_t-1
UpperCamelCase = scheduler.step(A_ , A_ , A_ , generator=A_ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
UpperCamelCase = pred_prev_sample
UpperCamelCase = torch.sum(torch.abs(A_ ) )
UpperCamelCase = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 258.9606 ) < 1e-2
assert abs(result_mean.item() - 0.3372 ) < 1e-3
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config(prediction_type='v_prediction' )
UpperCamelCase = scheduler_class(**A_ )
UpperCamelCase = len(A_ )
UpperCamelCase = self.dummy_model()
UpperCamelCase = self.dummy_sample_deter
UpperCamelCase = torch.manual_seed(0 )
for t in reversed(range(A_ ) ):
# 1. predict noise residual
UpperCamelCase = model(A_ , A_ )
# 2. predict previous mean of sample x_t-1
UpperCamelCase = scheduler.step(A_ , A_ , A_ , generator=A_ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
UpperCamelCase = pred_prev_sample
UpperCamelCase = torch.sum(torch.abs(A_ ) )
UpperCamelCase = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 202.0296 ) < 1e-2
assert abs(result_mean.item() - 0.2631 ) < 1e-3
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**A_ )
UpperCamelCase = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=A_ )
UpperCamelCase = scheduler.timesteps
for i, timestep in enumerate(A_ ):
if i == len(A_ ) - 1:
UpperCamelCase = -1
else:
UpperCamelCase = timesteps[i + 1]
UpperCamelCase = scheduler.previous_timestep(A_ )
UpperCamelCase = prev_t.item()
self.assertEqual(A_ , A_ )
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**A_ )
UpperCamelCase = [100, 87, 50, 51, 0]
with self.assertRaises(A_ , msg='`custom_timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=A_ )
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**A_ )
UpperCamelCase = [100, 87, 50, 1, 0]
UpperCamelCase = len(A_ )
with self.assertRaises(A_ , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=A_ , timesteps=A_ )
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**A_ )
UpperCamelCase = [scheduler.config.num_train_timesteps]
with self.assertRaises(
A_ , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=A_ )
| 3 | 0 |
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class lowercase :
def __init__( self , A_ , A_=3 , A_=32 , A_=3 , A_=10 , A_=[8, 16, 32, 64] , A_=[1, 1, 2, 1] , A_=True , A_=True , A_="relu" , A_=3 , A_=None , A_=["stage2", "stage3", "stage4"] , A_=[2, 3, 4] , A_=1 , ) -> Tuple:
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = image_size
UpperCamelCase = num_channels
UpperCamelCase = embeddings_size
UpperCamelCase = hidden_sizes
UpperCamelCase = depths
UpperCamelCase = is_training
UpperCamelCase = use_labels
UpperCamelCase = hidden_act
UpperCamelCase = num_labels
UpperCamelCase = scope
UpperCamelCase = len(A_ )
UpperCamelCase = out_features
UpperCamelCase = out_indices
UpperCamelCase = num_groups
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def __UpperCamelCase ( self , A_ , A_ , A_ ) -> Any:
"""simple docstring"""
UpperCamelCase = BitModel(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __UpperCamelCase ( self , A_ , A_ , A_ ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = self.num_labels
UpperCamelCase = BitForImageClassification(A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self , A_ , A_ , A_ ) -> Dict:
"""simple docstring"""
UpperCamelCase = BitBackbone(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
UpperCamelCase = None
UpperCamelCase = BitBackbone(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs
UpperCamelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
__lowercase : List[Any] = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
__lowercase : Any = (
{"feature-extraction": BitModel, "image-classification": BitForImageClassification}
if is_torch_available()
else {}
)
__lowercase : Any = False
__lowercase : Any = False
__lowercase : int = False
__lowercase : Dict = False
__lowercase : Any = False
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = BitModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=A_ , has_text_modality=A_ )
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
return
@unittest.skip(reason='Bit does not output attentions' )
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason='Bit does not use inputs_embeds' )
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip(reason='Bit does not support input and output embeddings' )
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
pass
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(A_ )
UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase = [*signature.parameters.keys()]
UpperCamelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , A_ )
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*A_ )
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(config=A_ )
for name, module in model.named_modules():
if isinstance(A_ , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
def check_hidden_states_output(A_ , A_ , A_ ):
UpperCamelCase = model_class(A_ )
model.to(A_ )
model.eval()
with torch.no_grad():
UpperCamelCase = model(**self._prepare_for_class(A_ , A_ ) )
UpperCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCamelCase = self.model_tester.num_stages
self.assertEqual(len(A_ ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = ['preactivation', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
UpperCamelCase = layer_type
UpperCamelCase = True
check_hidden_states_output(A_ , A_ , A_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase = True
check_hidden_states_output(A_ , A_ , A_ )
@unittest.skip(reason='Bit does not use feedforward chunking' )
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
pass
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A_ )
@slow
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = BitModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def A ( ) -> Tuple:
'''simple docstring'''
UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(A_ )
UpperCamelCase = self.default_image_processor
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=A_ , return_tensors='pt' ).to(A_ )
# forward pass
with torch.no_grad():
UpperCamelCase = model(**A_ )
# verify the logits
UpperCamelCase = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , A_ )
UpperCamelCase = torch.tensor([[-0.6526, -0.5263, -1.4398]] ).to(A_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A_ , atol=1e-4 ) )
@require_torch
class lowercase ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
__lowercase : List[Any] = (BitBackbone,) if is_torch_available() else ()
__lowercase : List[Any] = BitConfig
__lowercase : int = False
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = BitModelTester(self )
| 703 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
_UpperCAmelCase : List[str] = None
_UpperCAmelCase : Any = logging.get_logger(__name__)
_UpperCAmelCase : Tuple = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
_UpperCAmelCase : List[str] = {
"vocab_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model",
},
"tokenizer_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/tokenizer.json",
},
}
_UpperCAmelCase : Optional[int] = {
"camembert-base": 512,
}
_UpperCAmelCase : Union[str, Any] = "▁"
class lowercase ( _SCREAMING_SNAKE_CASE ):
__lowercase : str = VOCAB_FILES_NAMES
__lowercase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
__lowercase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : List[str] = ["input_ids", "attention_mask"]
__lowercase : Tuple = CamembertTokenizer
def __init__( self , A_=None , A_=None , A_="<s>" , A_="</s>" , A_="</s>" , A_="<s>" , A_="<unk>" , A_="<pad>" , A_="<mask>" , A_=["<s>NOTUSED", "</s>NOTUSED"] , **A_ , ) -> List[Any]:
"""simple docstring"""
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else mask_token
super().__init__(
A_ , tokenizer_file=A_ , bos_token=A_ , eos_token=A_ , sep_token=A_ , cls_token=A_ , unk_token=A_ , pad_token=A_ , mask_token=A_ , additional_special_tokens=A_ , **A_ , )
UpperCamelCase = vocab_file
UpperCamelCase = False if not self.vocab_file else True
def __UpperCamelCase ( self , A_ , A_ = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
UpperCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __UpperCamelCase ( self , A_ , A_ = None ) -> List[int]:
"""simple docstring"""
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCamelCase ( self , A_ , A_ = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(A_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCamelCase = os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ):
copyfile(self.vocab_file , A_ )
return (out_vocab_file,)
| 3 | 0 |
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
_UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
class lowercase ( _SCREAMING_SNAKE_CASE ):
__lowercase : Tuple = ["input_features", "attention_mask"]
def __init__( self , A_=80 , A_=16_000 , A_=0.0 , A_=10 , A_=25 , A_="hamming_window" , A_=32_768.0 , A_=0.97 , A_=1.0 , A_=True , A_=True , A_=False , **A_ , ) -> Optional[int]:
"""simple docstring"""
super().__init__(feature_size=A_ , sampling_rate=A_ , padding_value=A_ , **A_ )
UpperCamelCase = feature_size
UpperCamelCase = sampling_rate
UpperCamelCase = padding_value
UpperCamelCase = hop_length
UpperCamelCase = win_length
UpperCamelCase = frame_signal_scale
UpperCamelCase = preemphasis_coeff
UpperCamelCase = mel_floor
UpperCamelCase = normalize_means
UpperCamelCase = normalize_vars
UpperCamelCase = win_function
UpperCamelCase = return_attention_mask
UpperCamelCase = win_length * sampling_rate // 1_000
UpperCamelCase = hop_length * sampling_rate // 1_000
UpperCamelCase = optimal_fft_length(self.sample_size )
UpperCamelCase = (self.n_fft // 2) + 1
def __UpperCamelCase ( self , A_ ) -> np.ndarray:
"""simple docstring"""
if self.win_function == "hamming_window":
UpperCamelCase = window_function(window_length=self.sample_size , name=self.win_function , periodic=A_ )
else:
UpperCamelCase = window_function(window_length=self.sample_size , name=self.win_function )
UpperCamelCase = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , )
UpperCamelCase = spectrogram(
one_waveform * self.frame_signal_scale , window=A_ , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=A_ , preemphasis=self.preemphasis_coeff , mel_filters=A_ , mel_floor=self.mel_floor , log_mel='log' , )
return msfc_features.T
def __UpperCamelCase ( self , A_ , A_ , A_ ) -> Union[str, Any]:
"""simple docstring"""
# make sure we normalize float32 arrays
if self.normalize_means:
UpperCamelCase = x[:input_length].mean(axis=0 )
UpperCamelCase = np.subtract(A_ , A_ )
if self.normalize_vars:
UpperCamelCase = x[:input_length].std(axis=0 )
UpperCamelCase = np.divide(A_ , A_ )
if input_length < x.shape[0]:
UpperCamelCase = padding_value
# make sure array is in float32
UpperCamelCase = x.astype(np.floataa )
return x
def __UpperCamelCase ( self , A_ , A_ = None ) -> List[np.ndarray]:
"""simple docstring"""
UpperCamelCase = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(A_ , A_ , self.padding_value ) for x, n in zip(A_ , A_ )]
def __call__( self , A_ , A_ = False , A_ = None , A_ = False , A_ = None , A_ = None , A_ = None , A_ = None , **A_ , ) -> BatchFeature:
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
F''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'''
F''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the ``sampling_rate`` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
UpperCamelCase = isinstance(A_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
UpperCamelCase = is_batched_numpy or (
isinstance(A_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
UpperCamelCase = [np.asarray(A_ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(A_ , np.ndarray ):
UpperCamelCase = np.asarray(A_ , dtype=np.floataa )
elif isinstance(A_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
UpperCamelCase = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
UpperCamelCase = [raw_speech]
# extract fbank features
UpperCamelCase = [self._extract_mfsc_features(A_ ) for one_waveform in raw_speech]
# convert into correct format for padding
UpperCamelCase = BatchFeature({'input_features': features} )
UpperCamelCase = self.pad(
A_ , padding=A_ , max_length=A_ , truncation=A_ , pad_to_multiple_of=A_ , return_attention_mask=A_ , **A_ , )
# make sure list is in array format
UpperCamelCase = padded_inputs.get('input_features' )
if isinstance(input_features[0] , A_ ):
UpperCamelCase = [np.asarray(A_ , dtype=np.floataa ) for feature in input_features]
UpperCamelCase = padded_inputs.get('attention_mask' )
if attention_mask is not None:
UpperCamelCase = [np.asarray(A_ , dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
UpperCamelCase = (
np.array(A_ , dtype=np.intaa )
if self._get_padding_strategies(A_ , max_length=A_ ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
UpperCamelCase = self.normalize(
padded_inputs['input_features'] , attention_mask=A_ )
if return_tensors is not None:
UpperCamelCase = padded_inputs.convert_to_tensors(A_ )
return padded_inputs
| 704 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCAmelCase : Union[str, Any] = {
"configuration_git": ["GIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "GitConfig", "GitVisionConfig"],
"processing_git": ["GitProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Dict = [
"GIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"GitForCausalLM",
"GitModel",
"GitPreTrainedModel",
"GitVisionModel",
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
_UpperCAmelCase : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 3 | 0 |
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class lowercase :
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=32 , A_=2 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=3 , A_=4 , A_=None , ) -> Any:
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = 13
UpperCamelCase = 7
UpperCamelCase = True
UpperCamelCase = True
UpperCamelCase = True
UpperCamelCase = True
UpperCamelCase = 99
UpperCamelCase = 384
UpperCamelCase = 2
UpperCamelCase = 4
UpperCamelCase = 37
UpperCamelCase = 'gelu'
UpperCamelCase = 0.1
UpperCamelCase = 0.1
UpperCamelCase = 512
UpperCamelCase = 16
UpperCamelCase = 2
UpperCamelCase = 0.02
UpperCamelCase = 3
UpperCamelCase = 4
UpperCamelCase = 128
UpperCamelCase = 2
UpperCamelCase = 9
UpperCamelCase = 1
UpperCamelCase = None
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase = None
if self.use_input_mask:
UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase = None
if self.use_token_type_ids:
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=A_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = TFConvBertModel(config=A_ )
UpperCamelCase = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
UpperCamelCase = [input_ids, input_mask]
UpperCamelCase = model(A_ )
UpperCamelCase = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Any:
"""simple docstring"""
UpperCamelCase = TFConvBertForMaskedLM(config=A_ )
UpperCamelCase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
UpperCamelCase = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> List[str]:
"""simple docstring"""
UpperCamelCase = self.num_labels
UpperCamelCase = TFConvBertForSequenceClassification(config=A_ )
UpperCamelCase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
UpperCamelCase = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Tuple:
"""simple docstring"""
UpperCamelCase = self.num_choices
UpperCamelCase = TFConvBertForMultipleChoice(config=A_ )
UpperCamelCase = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
UpperCamelCase = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __UpperCamelCase ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> str:
"""simple docstring"""
UpperCamelCase = self.num_labels
UpperCamelCase = TFConvBertForTokenClassification(config=A_ )
UpperCamelCase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
UpperCamelCase = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = TFConvBertForQuestionAnswering(config=A_ )
UpperCamelCase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
UpperCamelCase = model(A_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) = config_and_inputs
UpperCamelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
__lowercase : Optional[Any] = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
__lowercase : Any = (
{
"feature-extraction": TFConvBertModel,
"fill-mask": TFConvBertForMaskedLM,
"question-answering": TFConvBertForQuestionAnswering,
"text-classification": TFConvBertForSequenceClassification,
"token-classification": TFConvBertForTokenClassification,
"zero-shot": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__lowercase : Tuple = False
__lowercase : Any = False
__lowercase : Optional[int] = False
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = TFConvBertModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=A_ , hidden_size=37 )
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A_ )
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*A_ )
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A_ )
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A_ )
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A_ )
@slow
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = True
UpperCamelCase = True
if hasattr(A_ , 'use_cache' ):
UpperCamelCase = True
UpperCamelCase = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
UpperCamelCase = getattr(self.model_tester , 'key_length' , A_ )
for model_class in self.all_model_classes:
UpperCamelCase = self._prepare_for_class(A_ , A_ )
UpperCamelCase = model_class(A_ )
UpperCamelCase = len(model(A_ ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(A_ , saved_model=A_ )
UpperCamelCase = os.path.join(A_ , 'saved_model' , '1' )
UpperCamelCase = tf.keras.models.load_model(A_ )
UpperCamelCase = model(A_ )
if self.is_encoder_decoder:
UpperCamelCase = outputs['encoder_hidden_states']
UpperCamelCase = outputs['encoder_attentions']
else:
UpperCamelCase = outputs['hidden_states']
UpperCamelCase = outputs['attentions']
self.assertEqual(len(A_ ) , A_ )
UpperCamelCase = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(A_ ) , A_ )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
self.assertIsNotNone(A_ )
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = True
UpperCamelCase = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length )
UpperCamelCase = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
UpperCamelCase = getattr(self.model_tester , 'key_length' , A_ )
UpperCamelCase = getattr(self.model_tester , 'key_length' , A_ )
def check_decoder_attentions_output(A_ ):
UpperCamelCase = len(A_ )
self.assertEqual(out_len % 2 , 0 )
UpperCamelCase = outputs.decoder_attentions
self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(A_ ):
UpperCamelCase = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
UpperCamelCase = True
UpperCamelCase = False
UpperCamelCase = model_class(A_ )
UpperCamelCase = model(self._prepare_for_class(A_ , A_ ) )
UpperCamelCase = len(A_ )
self.assertEqual(config.output_hidden_states , A_ )
check_encoder_attentions_output(A_ )
if self.is_encoder_decoder:
UpperCamelCase = model_class(A_ )
UpperCamelCase = model(self._prepare_for_class(A_ , A_ ) )
self.assertEqual(config.output_hidden_states , A_ )
check_decoder_attentions_output(A_ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
UpperCamelCase = True
UpperCamelCase = model_class(A_ )
UpperCamelCase = model(self._prepare_for_class(A_ , A_ ) )
self.assertEqual(config.output_hidden_states , A_ )
check_encoder_attentions_output(A_ )
# Check attention is always last and order is fine
UpperCamelCase = True
UpperCamelCase = True
UpperCamelCase = model_class(A_ )
UpperCamelCase = model(self._prepare_for_class(A_ , A_ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(A_ ) )
self.assertEqual(model.config.output_hidden_states , A_ )
check_encoder_attentions_output(A_ )
@require_tf
class lowercase ( unittest.TestCase ):
@slow
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
UpperCamelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCamelCase = model(A_ )[0]
UpperCamelCase = [1, 6, 768]
self.assertEqual(output.shape , A_ )
UpperCamelCase = tf.constant(
[
[
[-0.0347_5493, -0.468_6034, -0.3063_8832],
[0.2263_7248, -0.2698_8646, -0.742_3424],
[0.1032_4868, -0.4501_3508, -0.5828_0784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , A_ , atol=1e-4 )
| 705 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCAmelCase : Tuple = logging.get_logger(__name__)
_UpperCAmelCase : Union[str, Any] = {
"facebook/data2vec-text-base": "https://huggingface.co/data2vec/resolve/main/config.json",
}
class lowercase ( _SCREAMING_SNAKE_CASE ):
__lowercase : Dict = "data2vec-text"
def __init__( self , A_=30_522 , A_=768 , A_=12 , A_=12 , A_=3_072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=2 , A_=0.02 , A_=1e-12 , A_=1 , A_=0 , A_=2 , A_="absolute" , A_=True , A_=None , **A_ , ) -> Any:
"""simple docstring"""
super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ )
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = hidden_act
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
UpperCamelCase = position_embedding_type
UpperCamelCase = use_cache
UpperCamelCase = classifier_dropout
class lowercase ( _SCREAMING_SNAKE_CASE ):
@property
def __UpperCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
UpperCamelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
UpperCamelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 3 | 0 |
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
_UpperCAmelCase : Optional[int] = False
_UpperCAmelCase : Optional[int] = True
_UpperCAmelCase : Dict = False
if __name__ == "__main__":
_UpperCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
"--repo_path",
default=None,
type=str,
required=True,
help="The config json file corresponding to the architecture.",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
_UpperCAmelCase : Dict = parser.parse_args()
_UpperCAmelCase : Tuple = {
"image_size": "sample_size",
"num_res_blocks": "layers_per_block",
"block_channels": "block_out_channels",
"down_blocks": "down_block_types",
"up_blocks": "up_block_types",
"downscale_freq_shift": "freq_shift",
"resnet_num_groups": "norm_num_groups",
"resnet_act_fn": "act_fn",
"resnet_eps": "norm_eps",
"num_head_channels": "attention_head_dim",
}
_UpperCAmelCase : List[Any] = {
"time_steps": "time_proj",
"mid": "mid_block",
"downsample_blocks": "down_blocks",
"upsample_blocks": "up_blocks",
}
_UpperCAmelCase : int = "" if has_file(args.repo_path, "config.json") else "unet"
with open(os.path.join(args.repo_path, subfolder, "config.json"), "r", encoding="utf-8") as reader:
_UpperCAmelCase : int = reader.read()
_UpperCAmelCase : int = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, "config.json"):
_UpperCAmelCase : Tuple = UNetaDModel(**config)
else:
_UpperCAmelCase : int = UNetaDConditionModel if "ldm-text2im-large-256" in args.repo_path else UNetaDModel
_UpperCAmelCase : str = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
_UpperCAmelCase : int = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
_UpperCAmelCase : str = config[key]
del config[key]
_UpperCAmelCase : Optional[Any] = [k.replace("UNetRes", "") for k in config["down_block_types"]]
_UpperCAmelCase : Optional[int] = [k.replace("UNetRes", "") for k in config["up_block_types"]]
if do_only_weights:
_UpperCAmelCase : Dict = torch.load(os.path.join(args.repo_path, subfolder, "diffusion_pytorch_model.bin"))
_UpperCAmelCase : str = {}
for param_key, param_value in state_dict.items():
if param_key.endswith(".op.bias") or param_key.endswith(".op.weight"):
continue
_UpperCAmelCase : List[str] = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split(".")[0] == key:
_UpperCAmelCase : Optional[int] = param_value
_UpperCAmelCase : List[Any] = True
if not has_changed:
_UpperCAmelCase : str = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 706 |
from random import shuffle
import tensorflow as tf
from numpy import array
def A ( lowercase , lowercase ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase = int(lowercase )
assert noofclusters < len(lowercase )
# Find out the dimensionality
UpperCamelCase = len(vectors[0] )
# Will help select random centroids from among the available vectors
UpperCamelCase = list(range(len(lowercase ) ) )
shuffle(lowercase )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
UpperCamelCase = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
UpperCamelCase = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
UpperCamelCase = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(lowercase )
]
##These nodes will assign the centroid Variables the appropriate
##values
UpperCamelCase = tf.placeholder('float64' , [dim] )
UpperCamelCase = []
for centroid in centroids:
cent_assigns.append(tf.assign(lowercase , lowercase ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
UpperCamelCase = [tf.Variable(0 ) for i in range(len(lowercase ) )]
##These nodes will assign an assignment Variable the appropriate
##value
UpperCamelCase = tf.placeholder('int32' )
UpperCamelCase = []
for assignment in assignments:
cluster_assigns.append(tf.assign(lowercase , lowercase ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
UpperCamelCase = tf.placeholder('float' , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
UpperCamelCase = tf.reduce_mean(lowercase , 0 )
##Node for computing Euclidean distances
# Placeholders for input
UpperCamelCase = tf.placeholder('float' , [dim] )
UpperCamelCase = tf.placeholder('float' , [dim] )
UpperCamelCase = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(lowercase , lowercase ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
UpperCamelCase = tf.placeholder('float' , [noofclusters] )
UpperCamelCase = tf.argmin(lowercase , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
UpperCamelCase = tf.initialize_all_variables()
# Initialize all variables
sess.run(lowercase )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
UpperCamelCase = 100
for _ in range(lowercase ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(lowercase ) ):
UpperCamelCase = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
UpperCamelCase = [
sess.run(lowercase , feed_dict={va: vect, va: sess.run(lowercase )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
UpperCamelCase = sess.run(
lowercase , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(lowercase ):
# Collect all the vectors assigned to this cluster
UpperCamelCase = [
vectors[i]
for i in range(len(lowercase ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
UpperCamelCase = sess.run(
lowercase , feed_dict={mean_input: array(lowercase )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
UpperCamelCase = sess.run(lowercase )
UpperCamelCase = sess.run(lowercase )
return centroids, assignments
| 3 | 0 |
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(A_ , 'width_multiplier' ) )
class lowercase :
def __init__( self , A_ , A_=13 , A_=64 , A_=2 , A_=3 , A_="swish" , A_=3 , A_=32 , A_=0.1 , A_=0.02 , A_=True , A_=True , A_=10 , A_=None , A_=0.25 , A_=0.0 , A_=0.0 , ) -> Dict:
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = image_size
UpperCamelCase = patch_size
UpperCamelCase = num_channels
UpperCamelCase = make_divisible(512 * width_multiplier , divisor=8 )
UpperCamelCase = hidden_act
UpperCamelCase = conv_kernel_size
UpperCamelCase = output_stride
UpperCamelCase = classifier_dropout_prob
UpperCamelCase = use_labels
UpperCamelCase = is_training
UpperCamelCase = num_labels
UpperCamelCase = initializer_range
UpperCamelCase = scope
UpperCamelCase = width_multiplier
UpperCamelCase = ffn_dropout
UpperCamelCase = attn_dropout
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase = None
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCamelCase = self.get_config()
return config, pixel_values, labels, pixel_labels
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def __UpperCamelCase ( self , A_ , A_ , A_ , A_ ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = MobileViTVaModel(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __UpperCamelCase ( self , A_ , A_ , A_ , A_ ) -> str:
"""simple docstring"""
UpperCamelCase = self.num_labels
UpperCamelCase = MobileViTVaForImageClassification(A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self , A_ , A_ , A_ , A_ ) -> Tuple:
"""simple docstring"""
UpperCamelCase = self.num_labels
UpperCamelCase = MobileViTVaForSemanticSegmentation(A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
UpperCamelCase = model(A_ , labels=A_ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs
UpperCamelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
__lowercase : int = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
__lowercase : Dict = (
{
"feature-extraction": MobileViTVaModel,
"image-classification": MobileViTVaForImageClassification,
"image-segmentation": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__lowercase : str = False
__lowercase : int = False
__lowercase : Optional[int] = False
__lowercase : Optional[Any] = False
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = MobileViTVaModelTester(self )
UpperCamelCase = MobileViTVaConfigTester(self , config_class=A_ , has_text_modality=A_ )
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileViTV2 does not use inputs_embeds' )
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
pass
@unittest.skip(reason='MobileViTV2 does not support input and output embeddings' )
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='MobileViTV2 does not output attentions' )
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(reason='Got `CUDA error: misaligned address` for tests after this one being run.' )
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
pass
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(A_ )
UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase = [*signature.parameters.keys()]
UpperCamelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , A_ )
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
def check_hidden_states_output(A_ , A_ , A_ ):
UpperCamelCase = model_class(A_ )
model.to(A_ )
model.eval()
with torch.no_grad():
UpperCamelCase = model(**self._prepare_for_class(A_ , A_ ) )
UpperCamelCase = outputs.hidden_states
UpperCamelCase = 5
self.assertEqual(len(A_ ) , A_ )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
UpperCamelCase = 2
for i in range(len(A_ ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = True
check_hidden_states_output(A_ , A_ , A_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase = True
check_hidden_states_output(A_ , A_ , A_ )
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A_ )
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*A_ )
@slow
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = MobileViTVaModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def A ( ) -> int:
'''simple docstring'''
UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
return (
MobileViTImageProcessor.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256' )
if is_vision_available()
else None
)
@slow
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
UpperCamelCase = MobileViTVaForImageClassification.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256' ).to(
A_ )
UpperCamelCase = self.default_image_processor
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=A_ , return_tensors='pt' ).to(A_ )
# forward pass
with torch.no_grad():
UpperCamelCase = model(**A_ )
# verify the logits
UpperCamelCase = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , A_ )
UpperCamelCase = torch.tensor([-1.6336e00, -7.3204e-02, -5.1883e-01] ).to(A_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A_ , atol=1e-4 ) )
@slow
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
UpperCamelCase = model.to(A_ )
UpperCamelCase = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=A_ , return_tensors='pt' ).to(A_ )
# forward pass
with torch.no_grad():
UpperCamelCase = model(**A_ )
UpperCamelCase = outputs.logits
# verify the logits
UpperCamelCase = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , A_ )
UpperCamelCase = torch.tensor(
[
[[7.0863, 7.1525, 6.8201], [6.6931, 6.8770, 6.8933], [6.2978, 7.0366, 6.9636]],
[[-3.7134, -3.6712, -3.6675], [-3.5825, -3.3549, -3.4777], [-3.3435, -3.3979, -3.2857]],
[[-2.9329, -2.8003, -2.7369], [-3.0564, -2.4780, -2.0207], [-2.6889, -1.9298, -1.7640]],
] , device=A_ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , A_ , atol=1e-4 ) )
@slow
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
UpperCamelCase = model.to(A_ )
UpperCamelCase = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=A_ , return_tensors='pt' ).to(A_ )
# forward pass
with torch.no_grad():
UpperCamelCase = model(**A_ )
UpperCamelCase = outputs.logits.detach().cpu()
UpperCamelCase = image_processor.post_process_semantic_segmentation(outputs=A_ , target_sizes=[(50, 60)] )
UpperCamelCase = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , A_ )
UpperCamelCase = image_processor.post_process_semantic_segmentation(outputs=A_ )
UpperCamelCase = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , A_ )
| 707 |
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
_UpperCAmelCase : Tuple = _symbol_database.Default()
_UpperCAmelCase : List[Any] = _descriptor_pool.Default().AddSerializedFile(
b"\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03"
)
_UpperCAmelCase : int = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, "sentencepiece_model_pb2", _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
_UpperCAmelCase : int = None
_UpperCAmelCase : List[str] = b"H\003"
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
_UpperCAmelCase : Optional[Any] = 45
_UpperCAmelCase : Any = 1_581
_UpperCAmelCase : Tuple = 1_517
_UpperCAmelCase : List[str] = 1_570
_UpperCAmelCase : int = 1_584
_UpperCAmelCase : List[Any] = 1_793
_UpperCAmelCase : Optional[int] = 1_795
_UpperCAmelCase : Any = 1_916
_UpperCAmelCase : Tuple = 1_864
_UpperCAmelCase : List[Any] = 1_905
_UpperCAmelCase : Union[str, Any] = 1_919
_UpperCAmelCase : str = 2_429
_UpperCAmelCase : Any = 2_208
_UpperCAmelCase : Dict = 2_418
_UpperCAmelCase : Optional[Any] = 2_323
_UpperCAmelCase : Tuple = 2_407
# @@protoc_insertion_point(module_scope)
| 3 | 0 |
import math
def A ( lowercase ) -> str:
'''simple docstring'''
UpperCamelCase = 0
UpperCamelCase = 0
while num > 0:
UpperCamelCase = num % 8
UpperCamelCase = octal + (remainder * math.floor(math.pow(10 , lowercase ) ))
counter += 1
UpperCamelCase = math.floor(num / 8 ) # basically /= 8 without remainder if any
# This formatting removes trailing '.0' from `octal`.
return f'''0o{int(lowercase )}'''
def A ( ) -> None:
'''simple docstring'''
print('\n2 in octal is:' )
print(decimal_to_octal(2 ) ) # = 2
print('\n8 in octal is:' )
print(decimal_to_octal(8 ) ) # = 10
print('\n65 in octal is:' )
print(decimal_to_octal(65 ) ) # = 101
print('\n216 in octal is:' )
print(decimal_to_octal(216 ) ) # = 330
print('\n512 in octal is:' )
print(decimal_to_octal(512 ) ) # = 1000
print('\n' )
if __name__ == "__main__":
main()
| 708 |
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class lowercase ( unittest.TestCase ):
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
# A mock response for an HTTP head request to emulate server down
UpperCamelCase = mock.Mock()
UpperCamelCase = 500
UpperCamelCase = {}
UpperCamelCase = HTTPError
UpperCamelCase = {}
# Download this model to make sure it's in the cache.
UpperCamelCase = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=A_ ) as mock_head:
UpperCamelCase = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
# A mock response for an HTTP head request to emulate server down
UpperCamelCase = mock.Mock()
UpperCamelCase = 500
UpperCamelCase = {}
UpperCamelCase = HTTPError
UpperCamelCase = {}
# Download this model to make sure it's in the cache.
UpperCamelCase = GPTaTokenizerFast.from_pretrained('gpt2' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=A_ ) as mock_head:
UpperCamelCase = GPTaTokenizerFast.from_pretrained('gpt2' )
# This check we did call the fake head request
mock_head.assert_called()
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
# This test is for deprecated behavior and can be removed in v5
try:
UpperCamelCase = tempfile.mktemp()
with open(A_ , 'wb' ) as f:
http_get('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' , A_ )
UpperCamelCase = AlbertTokenizer.from_pretrained(A_ )
finally:
os.remove(A_ )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile('tokenizer.json' ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open('tokenizer.json' , 'wb' ) as f:
http_get('https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json' , A_ )
UpperCamelCase = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size , 1_000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove('tokenizer.json' )
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
# This test is for deprecated behavior and can be removed in v5
UpperCamelCase = AlbertTokenizer.from_pretrained('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' )
@is_staging_test
class lowercase ( unittest.TestCase ):
__lowercase : int = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def __UpperCamelCase ( cls ) -> Tuple:
"""simple docstring"""
UpperCamelCase = TOKEN
HfFolder.save_token(A_ )
@classmethod
def __UpperCamelCase ( cls ) -> Optional[int]:
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id='test-tokenizer' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-tokenizer-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-tokenizer' )
except HTTPError:
pass
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase = os.path.join(A_ , 'vocab.txt' )
with open(A_ , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
UpperCamelCase = BertTokenizer(A_ )
tokenizer.push_to_hub('test-tokenizer' , use_auth_token=self._token )
UpperCamelCase = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id='test-tokenizer' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(A_ , repo_id='test-tokenizer' , push_to_hub=A_ , use_auth_token=self._token )
UpperCamelCase = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase = os.path.join(A_ , 'vocab.txt' )
with open(A_ , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
UpperCamelCase = BertTokenizer(A_ )
tokenizer.push_to_hub('valid_org/test-tokenizer-org' , use_auth_token=self._token )
UpperCamelCase = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-tokenizer-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
A_ , repo_id='valid_org/test-tokenizer-org' , push_to_hub=A_ , use_auth_token=self._token )
UpperCamelCase = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
@require_tokenizers
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase = os.path.join(A_ , 'vocab.txt' )
with open(A_ , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
UpperCamelCase = CustomTokenizer(A_ )
# No fast custom tokenizer
tokenizer.push_to_hub('test-dynamic-tokenizer' , use_auth_token=self._token )
UpperCamelCase = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=A_ )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizer' )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase = os.path.join(A_ , 'vocab.txt' )
with open(A_ , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
UpperCamelCase = BertTokenizerFast.from_pretrained(A_ )
bert_tokenizer.save_pretrained(A_ )
UpperCamelCase = CustomTokenizerFast.from_pretrained(A_ )
tokenizer.push_to_hub('test-dynamic-tokenizer' , use_auth_token=self._token )
UpperCamelCase = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=A_ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizerFast' )
UpperCamelCase = AutoTokenizer.from_pretrained(
F'''{USER}/test-dynamic-tokenizer''' , use_fast=A_ , trust_remote_code=A_ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizer' )
class lowercase ( unittest.TestCase ):
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = Trie()
trie.add('Hello 友達' )
self.assertEqual(trie.data , {'H': {'e': {'l': {'l': {'o': {' ': {'友': {'達': {'': 1}}}}}}}}} )
trie.add('Hello' )
trie.data
self.assertEqual(trie.data , {'H': {'e': {'l': {'l': {'o': {'': 1, ' ': {'友': {'達': {'': 1}}}}}}}}} )
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
UpperCamelCase = Trie()
self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) , ['[CLS] This is a extra_id_100'] )
trie.add('[CLS]' )
trie.add('extra_id_1' )
trie.add('extra_id_100' )
self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) , ['[CLS]', ' This is a ', 'extra_id_100'] )
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = Trie()
trie.add('A' )
self.assertEqual(trie.split('ABC' ) , ['A', 'BC'] )
self.assertEqual(trie.split('BCA' ) , ['BC', 'A'] )
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = Trie()
trie.add('TOKEN]' )
trie.add('[SPECIAL_TOKEN]' )
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) , ['This is something ', '[SPECIAL_TOKEN]'] )
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = Trie()
trie.add('A' )
trie.add('P' )
trie.add('[SPECIAL_TOKEN]' )
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) , ['This is something ', '[SPECIAL_TOKEN]'] )
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = Trie()
trie.add('AB' )
trie.add('B' )
trie.add('C' )
self.assertEqual(trie.split('ABC' ) , ['AB', 'C'] )
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = Trie()
trie.add('ABC' )
trie.add('B' )
trie.add('CD' )
self.assertEqual(trie.split('ABCD' ) , ['ABC', 'D'] )
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
# Even if the offsets are wrong, we necessarily output correct string
# parts.
UpperCamelCase = Trie()
UpperCamelCase = trie.cut_text('ABC' , [0, 0, 2, 1, 2, 3] )
self.assertEqual(A_ , ['AB', 'C'] )
| 3 | 0 |
from abc import ABC, abstractmethod
from typing import List, Optional
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self ) -> Optional[Any]:
"""simple docstring"""
self.test()
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = 0
UpperCamelCase = False
while not completed:
if counter == 1:
self.reset()
UpperCamelCase = self.advance()
if not self.does_advance(A_ ):
raise Exception(
'Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.' )
UpperCamelCase , UpperCamelCase , UpperCamelCase = self.update(A_ )
counter += 1
if counter > 10_000:
raise Exception('update() does not fulfill the constraint.' )
if self.remaining() != 0:
raise Exception('Custom Constraint is not defined correctly.' )
@abstractmethod
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __UpperCamelCase ( self , A_ ) -> str:
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __UpperCamelCase ( self , A_ ) -> int:
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __UpperCamelCase ( self , A_=False ) -> int:
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ ) -> Any:
"""simple docstring"""
super(A_ , self ).__init__()
if not isinstance(A_ , A_ ) or len(A_ ) == 0:
raise ValueError(F'''`token_ids` has to be a non-empty list, but is {token_ids}.''' )
if any((not isinstance(A_ , A_ ) or token_id < 0) for token_id in token_ids ):
raise ValueError(F'''Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.''' )
UpperCamelCase = token_ids
UpperCamelCase = len(self.token_ids )
UpperCamelCase = -1 # the index of the currently fulfilled step
UpperCamelCase = False
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def __UpperCamelCase ( self , A_ ) -> Optional[int]:
"""simple docstring"""
if not isinstance(A_ , A_ ):
raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(A_ )}''' )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def __UpperCamelCase ( self , A_ ) -> Optional[int]:
"""simple docstring"""
if not isinstance(A_ , A_ ):
raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(A_ )}''' )
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
if self.does_advance(A_ ):
self.fulfilled_idx += 1
UpperCamelCase = True
if self.fulfilled_idx == (self.seqlen - 1):
UpperCamelCase = True
UpperCamelCase = completed
else:
# failed to make progress.
UpperCamelCase = True
self.reset()
return stepped, completed, reset
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = False
UpperCamelCase = 0
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
return self.seqlen - (self.fulfilled_idx + 1)
def __UpperCamelCase ( self , A_=False ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = PhrasalConstraint(self.token_ids )
if stateful:
UpperCamelCase = self.seqlen
UpperCamelCase = self.fulfilled_idx
UpperCamelCase = self.completed
return new_constraint
class lowercase :
def __init__( self , A_ , A_=True ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = max([len(A_ ) for one in nested_token_ids] )
UpperCamelCase = {}
for token_ids in nested_token_ids:
UpperCamelCase = root
for tidx, token_id in enumerate(A_ ):
if token_id not in level:
UpperCamelCase = {}
UpperCamelCase = level[token_id]
if no_subsets and self.has_subsets(A_ , A_ ):
raise ValueError(
'Each list in `nested_token_ids` can\'t be a complete subset of another list, but is'
F''' {nested_token_ids}.''' )
UpperCamelCase = root
def __UpperCamelCase ( self , A_ ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = self.trie
for current_token in current_seq:
UpperCamelCase = start[current_token]
UpperCamelCase = list(start.keys() )
return next_tokens
def __UpperCamelCase ( self , A_ ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = self.next_tokens(A_ )
return len(A_ ) == 0
def __UpperCamelCase ( self , A_ ) -> List[str]:
"""simple docstring"""
UpperCamelCase = list(root.values() )
if len(A_ ) == 0:
return 1
else:
return sum([self.count_leaves(A_ ) for nn in next_nodes] )
def __UpperCamelCase ( self , A_ , A_ ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = self.count_leaves(A_ )
return len(A_ ) != leaf_count
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ ) -> str:
"""simple docstring"""
super(A_ , self ).__init__()
if not isinstance(A_ , A_ ) or len(A_ ) == 0:
raise ValueError(F'''`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.''' )
if any(not isinstance(A_ , A_ ) for token_ids in nested_token_ids ):
raise ValueError(F'''`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.''' )
if any(
any((not isinstance(A_ , A_ ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
F'''Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.''' )
UpperCamelCase = DisjunctiveTrie(A_ )
UpperCamelCase = nested_token_ids
UpperCamelCase = self.trie.max_height
UpperCamelCase = []
UpperCamelCase = False
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = self.trie.next_tokens(self.current_seq )
if len(A_ ) == 0:
return None
else:
return token_list
def __UpperCamelCase ( self , A_ ) -> Optional[Any]:
"""simple docstring"""
if not isinstance(A_ , A_ ):
raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(A_ )}''' )
UpperCamelCase = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def __UpperCamelCase ( self , A_ ) -> Optional[Any]:
"""simple docstring"""
if not isinstance(A_ , A_ ):
raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(A_ )}''' )
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
if self.does_advance(A_ ):
self.current_seq.append(A_ )
UpperCamelCase = True
else:
UpperCamelCase = True
self.reset()
UpperCamelCase = self.trie.reached_leaf(self.current_seq )
UpperCamelCase = completed
return stepped, completed, reset
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
UpperCamelCase = False
UpperCamelCase = []
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def __UpperCamelCase ( self , A_=False ) -> int:
"""simple docstring"""
UpperCamelCase = DisjunctiveConstraint(self.token_ids )
if stateful:
UpperCamelCase = self.seqlen
UpperCamelCase = self.current_seq
UpperCamelCase = self.completed
return new_constraint
class lowercase :
def __init__( self , A_ ) -> Tuple:
"""simple docstring"""
UpperCamelCase = constraints
# max # of steps required to fulfill a given constraint
UpperCamelCase = max([c.seqlen for c in constraints] )
UpperCamelCase = len(A_ )
UpperCamelCase = False
self.init_state()
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = []
UpperCamelCase = None
UpperCamelCase = [constraint.copy(stateful=A_ ) for constraint in self.constraints]
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
UpperCamelCase = constraint.advance()
if isinstance(A_ , A_ ):
token_list.append(A_ )
elif isinstance(A_ , A_ ):
token_list.extend(A_ )
else:
UpperCamelCase = self.inprogress_constraint.advance()
if isinstance(A_ , A_ ):
token_list.append(A_ )
elif isinstance(A_ , A_ ):
token_list.extend(A_ )
if len(A_ ) == 0:
return None
else:
return token_list
def __UpperCamelCase ( self , A_ ) -> Any:
"""simple docstring"""
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
UpperCamelCase , UpperCamelCase = self.add(A_ )
# the entire list of constraints are fulfilled
if self.completed:
break
def __UpperCamelCase ( self , A_ ) -> int:
"""simple docstring"""
if not isinstance(A_ , A_ ):
raise ValueError(F'''`token_id` should be an `int`, but is `{token_id}`.''' )
UpperCamelCase , UpperCamelCase = False, False
if self.completed:
UpperCamelCase = True
UpperCamelCase = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
UpperCamelCase , UpperCamelCase , UpperCamelCase = self.inprogress_constraint.update(A_ )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=A_ ) )
UpperCamelCase = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
UpperCamelCase = None
if len(self.pending_constraints ) == 0:
# we're done!
UpperCamelCase = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(A_ ):
UpperCamelCase , UpperCamelCase , UpperCamelCase = pending_constraint.update(A_ )
if not stepped:
raise Exception(
'`constraint.update(token_id)` is not yielding incremental progress, '
'even though `constraint.does_advance(token_id)` is true.' )
if complete:
self.complete_constraints.append(A_ )
UpperCamelCase = None
if not complete and stepped:
UpperCamelCase = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
UpperCamelCase = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
UpperCamelCase = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def __UpperCamelCase ( self , A_=True ) -> Tuple:
"""simple docstring"""
UpperCamelCase = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
UpperCamelCase = [
constraint.copy(stateful=A_ ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
UpperCamelCase = self.inprogress_constraint.copy(stateful=A_ )
UpperCamelCase = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 709 |
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def A ( lowercase , lowercase ) -> Optional[int]:
'''simple docstring'''
assert isinstance(lowercase , lowercase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def A ( lowercase , lowercase , lowercase ) -> Tuple:
'''simple docstring'''
UpperCamelCase = tmp_path / 'cache'
UpperCamelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCamelCase = ParquetDatasetReader(lowercase , cache_dir=lowercase , keep_in_memory=lowercase ).read()
_check_parquet_dataset(lowercase , lowercase )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def A ( lowercase , lowercase , lowercase ) -> Tuple:
'''simple docstring'''
UpperCamelCase = tmp_path / 'cache'
UpperCamelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
UpperCamelCase = features.copy() if features else default_expected_features
UpperCamelCase = (
Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase = ParquetDatasetReader(lowercase , features=lowercase , cache_dir=lowercase ).read()
_check_parquet_dataset(lowercase , lowercase )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def A ( lowercase , lowercase , lowercase ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase = tmp_path / 'cache'
UpperCamelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
UpperCamelCase = ParquetDatasetReader(lowercase , cache_dir=lowercase , split=lowercase ).read()
_check_parquet_dataset(lowercase , lowercase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('path_type' , [str, list] )
def A ( lowercase , lowercase , lowercase ) -> Union[str, Any]:
'''simple docstring'''
if issubclass(lowercase , lowercase ):
UpperCamelCase = parquet_path
elif issubclass(lowercase , lowercase ):
UpperCamelCase = [parquet_path]
UpperCamelCase = tmp_path / 'cache'
UpperCamelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
UpperCamelCase = ParquetDatasetReader(lowercase , cache_dir=lowercase ).read()
_check_parquet_dataset(lowercase , lowercase )
def A ( lowercase , lowercase , lowercase=("train",) ) -> Tuple:
'''simple docstring'''
assert isinstance(lowercase , lowercase )
for split in splits:
UpperCamelCase = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def A ( lowercase , lowercase , lowercase ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase = tmp_path / 'cache'
UpperCamelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCamelCase = ParquetDatasetReader(
{'train': parquet_path} , cache_dir=lowercase , keep_in_memory=lowercase ).read()
_check_parquet_datasetdict(lowercase , lowercase )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def A ( lowercase , lowercase , lowercase ) -> List[Any]:
'''simple docstring'''
UpperCamelCase = tmp_path / 'cache'
UpperCamelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
UpperCamelCase = features.copy() if features else default_expected_features
UpperCamelCase = (
Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase = ParquetDatasetReader({'train': parquet_path} , features=lowercase , cache_dir=lowercase ).read()
_check_parquet_datasetdict(lowercase , lowercase )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def A ( lowercase , lowercase , lowercase ) -> Union[str, Any]:
'''simple docstring'''
if split:
UpperCamelCase = {split: parquet_path}
else:
UpperCamelCase = 'train'
UpperCamelCase = {'train': parquet_path, 'test': parquet_path}
UpperCamelCase = tmp_path / 'cache'
UpperCamelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
UpperCamelCase = ParquetDatasetReader(lowercase , cache_dir=lowercase ).read()
_check_parquet_datasetdict(lowercase , lowercase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def A ( lowercase , lowercase ) -> List[Any]:
'''simple docstring'''
UpperCamelCase = ParquetDatasetWriter(lowercase , tmp_path / 'foo.parquet' )
assert writer.write() > 0
UpperCamelCase = pq.ParquetFile(tmp_path / 'foo.parquet' )
UpperCamelCase = pf.read()
assert dataset.data.table == output_table
def A ( lowercase , lowercase ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase = str(shared_datadir / 'test_image_rgb.jpg' )
UpperCamelCase = {'image': [image_path]}
UpperCamelCase = Features({'image': Image()} )
UpperCamelCase = Dataset.from_dict(lowercase , features=lowercase )
UpperCamelCase = ParquetDatasetWriter(lowercase , tmp_path / 'foo.parquet' )
assert writer.write() > 0
UpperCamelCase = Dataset.from_parquet(str(tmp_path / 'foo.parquet' ) )
assert dataset.features == reloaded_dataset.features
UpperCamelCase = ParquetDatasetReader(str(tmp_path / 'foo.parquet' ) , streaming=lowercase ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
'feature, expected' , [
(Features({'foo': Value('int32' )} ), None),
(Features({'image': Image(), 'foo': Value('int32' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({'nested': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def A ( lowercase , lowercase ) -> Union[str, Any]:
'''simple docstring'''
assert get_writer_batch_size(lowercase ) == expected
| 3 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
__lowercase : str = StableDiffusionXLImgaImgPipeline
__lowercase : List[str] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
__lowercase : Any = PipelineTesterMixin.required_optional_params - {"latents"}
__lowercase : str = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__lowercase : Optional[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
__lowercase : Tuple = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , attention_head_dim=(2, 4) , use_linear_projection=A_ , addition_embed_type='text_time' , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
UpperCamelCase = EulerDiscreteScheduler(
beta_start=0.0_0085 , beta_end=0.012 , steps_offset=1 , beta_schedule='scaled_linear' , timestep_spacing='leading' , )
torch.manual_seed(0 )
UpperCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act='gelu' , projection_dim=32 , )
UpperCamelCase = CLIPTextModel(A_ )
UpperCamelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' , local_files_only=A_ )
UpperCamelCase = CLIPTextModelWithProjection(A_ )
UpperCamelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' , local_files_only=A_ )
UpperCamelCase = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'text_encoder_2': text_encoder_a,
'tokenizer_2': tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def __UpperCamelCase ( self , A_ , A_=0 ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(A_ ) ).to(A_ )
UpperCamelCase = image / 2 + 0.5
if str(A_ ).startswith('mps' ):
UpperCamelCase = torch.manual_seed(A_ )
else:
UpperCamelCase = torch.Generator(device=A_ ).manual_seed(A_ )
UpperCamelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 5.0,
'output_type': 'numpy',
'strength': 0.75,
}
return inputs
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = StableDiffusionXLImgaImgPipeline(**A_ )
UpperCamelCase = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
UpperCamelCase = self.get_dummy_inputs(A_ )
UpperCamelCase = sd_pipe(**A_ ).images
UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCamelCase = np.array([0.4656, 0.4840, 0.4439, 0.6698, 0.5574, 0.4524, 0.5799, 0.5943, 0.5165] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
pass
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = StableDiffusionXLImgaImgPipeline(**A_ )
UpperCamelCase = sd_pipe.to(A_ )
UpperCamelCase = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
# forward without prompt embeds
UpperCamelCase = self.get_dummy_inputs(A_ )
UpperCamelCase = 3 * ['this is a negative prompt']
UpperCamelCase = negative_prompt
UpperCamelCase = 3 * [inputs['prompt']]
UpperCamelCase = sd_pipe(**A_ )
UpperCamelCase = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
UpperCamelCase = self.get_dummy_inputs(A_ )
UpperCamelCase = 3 * ['this is a negative prompt']
UpperCamelCase = 3 * [inputs.pop('prompt' )]
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) = sd_pipe.encode_prompt(A_ , negative_prompt=A_ )
UpperCamelCase = sd_pipe(
**A_ , prompt_embeds=A_ , negative_prompt_embeds=A_ , pooled_prompt_embeds=A_ , negative_pooled_prompt_embeds=A_ , )
UpperCamelCase = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self , A_ , A_="cpu" , A_=torch.floataa , A_=0 ) -> Any:
"""simple docstring"""
UpperCamelCase = torch.Generator(device=A_ ).manual_seed(A_ )
UpperCamelCase = np.random.RandomState(A_ ).standard_normal((1, 4, 64, 64) )
UpperCamelCase = torch.from_numpy(A_ ).to(device=A_ , dtype=A_ )
UpperCamelCase = {
'prompt': 'a photograph of an astronaut riding a horse',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = DiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-base' )
pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
UpperCamelCase = self.get_inputs(A_ )
UpperCamelCase = pipe(**A_ ).images
UpperCamelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
UpperCamelCase = np.array([0.4_9493, 0.4_7896, 0.4_0798, 0.5_4214, 0.5_3212, 0.4_8202, 0.4_7656, 0.4_6329, 0.4_8506] )
assert np.abs(image_slice - expected_slice ).max() < 7e-3
| 710 |
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class lowercase ( unittest.TestCase ):
def __init__( self , A_ , A_=7 , A_=3 , A_=18 , A_=30 , A_=400 , A_=True , A_=None , A_=True , A_=False , A_=True , A_=True , A_=[0.5, 0.5, 0.5] , A_=[0.5, 0.5, 0.5] , ) -> Tuple:
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = num_channels
UpperCamelCase = image_size
UpperCamelCase = min_resolution
UpperCamelCase = max_resolution
UpperCamelCase = do_resize
UpperCamelCase = size if size is not None else {'height': 18, 'width': 20}
UpperCamelCase = do_thumbnail
UpperCamelCase = do_align_axis
UpperCamelCase = do_pad
UpperCamelCase = do_normalize
UpperCamelCase = image_mean
UpperCamelCase = image_std
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class lowercase ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
__lowercase : Optional[int] = DonutImageProcessor if is_vision_available() else None
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = DonutImageProcessingTester(self )
@property
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , 'do_resize' ) )
self.assertTrue(hasattr(A_ , 'size' ) )
self.assertTrue(hasattr(A_ , 'do_thumbnail' ) )
self.assertTrue(hasattr(A_ , 'do_align_long_axis' ) )
self.assertTrue(hasattr(A_ , 'do_pad' ) )
self.assertTrue(hasattr(A_ , 'do_normalize' ) )
self.assertTrue(hasattr(A_ , 'image_mean' ) )
self.assertTrue(hasattr(A_ , 'image_std' ) )
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 20} )
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
# Previous config had dimensions in (width, height) order
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {'height': 84, 'width': 42} )
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
pass
@is_flaky()
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
# Initialize image_processing
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , Image.Image )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
UpperCamelCase = image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
# Initialize image_processing
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , np.ndarray )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
UpperCamelCase = image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
# Initialize image_processing
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
UpperCamelCase = image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
| 3 | 0 |
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def A ( lowercase , lowercase ) -> Dict:
'''simple docstring'''
UpperCamelCase = []
for part_id in partition_order:
UpperCamelCase = df.where(f'''SPARK_PARTITION_ID() = {part_id}''' ).collect()
for row_idx, row in enumerate(lowercase ):
expected_row_ids_and_row_dicts.append((f'''{part_id}_{row_idx}''', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def A ( ) -> List[Any]:
'''simple docstring'''
UpperCamelCase = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
UpperCamelCase = spark.range(100 ).repartition(1 )
UpperCamelCase = Spark(lowercase )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def A ( ) -> Dict:
'''simple docstring'''
UpperCamelCase = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
UpperCamelCase = spark.range(10 ).repartition(2 )
UpperCamelCase = [1, 0]
UpperCamelCase = _generate_iterable_examples(lowercase , lowercase ) # Reverse the partitions.
UpperCamelCase = _get_expected_row_ids_and_row_dicts_for_partition_order(lowercase , lowercase )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
UpperCamelCase , UpperCamelCase = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def A ( ) -> List[Any]:
'''simple docstring'''
UpperCamelCase = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
UpperCamelCase = spark.range(10 ).repartition(1 )
UpperCamelCase = SparkExamplesIterable(lowercase )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(lowercase ):
assert row_id == f'''0_{i}'''
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def A ( ) -> str:
'''simple docstring'''
UpperCamelCase = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
UpperCamelCase = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch('numpy.random.Generator' ) as generator_mock:
UpperCamelCase = lambda lowercase : x.reverse()
UpperCamelCase = _get_expected_row_ids_and_row_dicts_for_partition_order(lowercase , [2, 1, 0] )
UpperCamelCase = SparkExamplesIterable(lowercase ).shuffle_data_sources(lowercase )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(lowercase ):
UpperCamelCase , UpperCamelCase = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def A ( ) -> str:
'''simple docstring'''
UpperCamelCase = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
UpperCamelCase = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
UpperCamelCase = SparkExamplesIterable(lowercase ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
UpperCamelCase = _get_expected_row_ids_and_row_dicts_for_partition_order(lowercase , [0, 2] )
for i, (row_id, row_dict) in enumerate(lowercase ):
UpperCamelCase , UpperCamelCase = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
UpperCamelCase = SparkExamplesIterable(lowercase ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
UpperCamelCase = _get_expected_row_ids_and_row_dicts_for_partition_order(lowercase , [1, 3] )
for i, (row_id, row_dict) in enumerate(lowercase ):
UpperCamelCase , UpperCamelCase = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def A ( ) -> List[Any]:
'''simple docstring'''
UpperCamelCase = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
UpperCamelCase = spark.range(100 ).repartition(1 )
UpperCamelCase = Spark(lowercase )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 711 |
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase : Dict = logging.get_logger(__name__)
_UpperCAmelCase : Optional[Any] = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
_UpperCAmelCase : str = {
"vocab_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"
},
"merges_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"
},
"tokenizer_config_file": {
"facebook/blenderbot_small-90M": (
"https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"
)
},
}
_UpperCAmelCase : List[str] = {"facebook/blenderbot_small-90M": 512}
def A ( lowercase ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase = set()
UpperCamelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCamelCase = char
UpperCamelCase = set(lowercase )
return pairs
class lowercase ( _SCREAMING_SNAKE_CASE ):
__lowercase : Optional[Any] = VOCAB_FILES_NAMES
__lowercase : Tuple = PRETRAINED_VOCAB_FILES_MAP
__lowercase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : Any = ["input_ids", "attention_mask"]
def __init__( self , A_ , A_ , A_="__start__" , A_="__end__" , A_="__unk__" , A_="__null__" , **A_ , ) -> List[Any]:
"""simple docstring"""
super().__init__(unk_token=A_ , bos_token=A_ , eos_token=A_ , pad_token=A_ , **A_ )
with open(A_ , encoding='utf-8' ) as vocab_handle:
UpperCamelCase = json.load(A_ )
UpperCamelCase = {v: k for k, v in self.encoder.items()}
with open(A_ , encoding='utf-8' ) as merges_handle:
UpperCamelCase = merges_handle.read().split('\n' )[1:-1]
UpperCamelCase = [tuple(merge.split() ) for merge in merges]
UpperCamelCase = dict(zip(A_ , range(len(A_ ) ) ) )
UpperCamelCase = {}
@property
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
return len(self.encoder )
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def __UpperCamelCase ( self , A_ ) -> str:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
UpperCamelCase = re.sub('([.,!?()])' , r' \1' , A_ )
UpperCamelCase = re.sub('(\')' , r' \1 ' , A_ )
UpperCamelCase = re.sub(r'\s{2,}' , ' ' , A_ )
if "\n" in token:
UpperCamelCase = token.replace('\n' , ' __newln__' )
UpperCamelCase = token.split(' ' )
UpperCamelCase = []
for token in tokens:
if not len(A_ ):
continue
UpperCamelCase = token.lower()
UpperCamelCase = tuple(A_ )
UpperCamelCase = tuple(list(word[:-1] ) + [word[-1] + '</w>'] )
UpperCamelCase = get_pairs(A_ )
if not pairs:
words.append(A_ )
continue
while True:
UpperCamelCase = min(A_ , key=lambda A_ : self.bpe_ranks.get(A_ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
UpperCamelCase , UpperCamelCase = bigram
UpperCamelCase = []
UpperCamelCase = 0
while i < len(A_ ):
try:
UpperCamelCase = word.index(A_ , A_ )
new_word.extend(word[i:j] )
UpperCamelCase = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(A_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCamelCase = tuple(A_ )
UpperCamelCase = new_word
if len(A_ ) == 1:
break
else:
UpperCamelCase = get_pairs(A_ )
UpperCamelCase = '@@ '.join(A_ )
UpperCamelCase = word[:-4]
UpperCamelCase = word
words.append(A_ )
return " ".join(A_ )
def __UpperCamelCase ( self , A_ ) -> List[str]:
"""simple docstring"""
UpperCamelCase = []
UpperCamelCase = re.findall(r'\S+\n?' , A_ )
for token in words:
split_tokens.extend(list(self.bpe(A_ ).split(' ' ) ) )
return split_tokens
def __UpperCamelCase ( self , A_ ) -> int:
"""simple docstring"""
UpperCamelCase = token.lower()
return self.encoder.get(A_ , self.encoder.get(self.unk_token ) )
def __UpperCamelCase ( self , A_ ) -> str:
"""simple docstring"""
return self.decoder.get(A_ , self.unk_token )
def __UpperCamelCase ( self , A_ ) -> str:
"""simple docstring"""
UpperCamelCase = ' '.join(A_ ).replace('@@ ' , '' ).strip()
return out_string
def __UpperCamelCase ( self , A_ , A_ = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(A_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCamelCase = os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
UpperCamelCase = os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(A_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=A_ , ensure_ascii=A_ ) + '\n' )
UpperCamelCase = 0
with open(A_ , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda A_ : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
' Please check that the tokenizer is not corrupted!' )
UpperCamelCase = token_index
writer.write(' '.join(A_ ) + '\n' )
index += 1
return vocab_file, merge_file
| 3 | 0 |
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class lowercase ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
__lowercase : Union[str, Any] = VideoToVideoSDPipeline
__lowercase : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"video"} ) - {"image", "width", "height"}
__lowercase : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"video"} ) - {"image"}
__lowercase : str = PipelineTesterMixin.required_optional_params - {"latents"}
__lowercase : int = False
# No `output_type`.
__lowercase : int = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback",
"callback_steps",
] )
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'DownBlock3D') , up_block_types=('UpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D') , cross_attention_dim=32 , attention_head_dim=4 , )
UpperCamelCase = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=A_ , set_alpha_to_one=A_ , )
torch.manual_seed(0 )
UpperCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act='gelu' , projection_dim=512 , )
UpperCamelCase = CLIPTextModel(A_ )
UpperCamelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
UpperCamelCase = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def __UpperCamelCase ( self , A_ , A_=0 ) -> Any:
"""simple docstring"""
UpperCamelCase = floats_tensor((1, 3, 3, 32, 32) , rng=random.Random(A_ ) ).to(A_ )
if str(A_ ).startswith('mps' ):
UpperCamelCase = torch.manual_seed(A_ )
else:
UpperCamelCase = torch.Generator(device=A_ ).manual_seed(A_ )
UpperCamelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'video': video,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'pt',
}
return inputs
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = VideoToVideoSDPipeline(**A_ )
UpperCamelCase = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
UpperCamelCase = self.get_dummy_inputs(A_ )
UpperCamelCase = 'np'
UpperCamelCase = sd_pipe(**A_ ).frames
UpperCamelCase = frames[0][-3:, -3:, -1]
assert frames[0].shape == (32, 32, 3)
UpperCamelCase = np.array([106, 117, 113, 174, 137, 112, 148, 151, 131] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=A_ , expected_max_diff=5e-3 )
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='`num_images_per_prompt` argument is not supported for this pipeline.' )
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
pass
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
return super().test_progress_bar()
@slow
@skip_mps
class lowercase ( unittest.TestCase ):
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = VideoToVideoSDPipeline.from_pretrained('cerspense/zeroscope_v2_XL' , torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
UpperCamelCase = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCamelCase = torch.randn((1, 10, 3, 1_024, 576) , generator=A_ )
UpperCamelCase = video.to('cuda' )
UpperCamelCase = 'Spiderman is surfing'
UpperCamelCase = pipe(A_ , video=A_ , generator=A_ , num_inference_steps=3 , output_type='pt' ).frames
UpperCamelCase = np.array([-1.045_8984, -1.127_9297, -0.966_3086, -0.9150_3906, -0.7509_7656] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1e-2
| 712 |
def A ( lowercase ) -> str:
'''simple docstring'''
UpperCamelCase = int(lowercase )
if decimal in (0, 1): # Exit cases for the recursion
return str(lowercase )
UpperCamelCase , UpperCamelCase = divmod(lowercase , 2 )
return binary_recursive(lowercase ) + str(lowercase )
def A ( lowercase ) -> str:
'''simple docstring'''
UpperCamelCase = str(lowercase ).strip()
if not number:
raise ValueError('No input value was provided' )
UpperCamelCase = '-' if number.startswith('-' ) else ''
UpperCamelCase = number.lstrip('-' )
if not number.isnumeric():
raise ValueError('Input value is not an integer' )
return f'''{negative}0b{binary_recursive(int(lowercase ) )}'''
if __name__ == "__main__":
from doctest import testmod
testmod()
| 3 | 0 |
from typing import Any
def A ( lowercase ) -> list[Any]:
'''simple docstring'''
if not input_list:
return []
UpperCamelCase = [input_list.count(lowercase ) for value in input_list]
UpperCamelCase = max(lowercase ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(lowercase ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 713 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_UpperCAmelCase : Tuple = logging.get_logger(__name__)
_UpperCAmelCase : Tuple = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.linear_k": "encoder.layers.*.self_attn.linear_k",
"self_attn.linear_v": "encoder.layers.*.self_attn.linear_v",
"self_attn.linear_q": "encoder.layers.*.self_attn.linear_q",
"self_attn.pos_bias_u": "encoder.layers.*.self_attn.pos_bias_u",
"self_attn.pos_bias_v": "encoder.layers.*.self_attn.pos_bias_v",
"self_attn.linear_out": "encoder.layers.*.self_attn.linear_out",
"self_attn.linear_pos": "encoder.layers.*.self_attn.linear_pos",
"self_attn.rotary_emb": "encoder.embed_positions",
"self_attn_layer_norm": "encoder.layers.*.self_attn_layer_norm",
"conv_module.pointwise_conv1": "encoder.layers.*.conv_module.pointwise_conv1",
"conv_module.pointwise_conv2": "encoder.layers.*.conv_module.pointwise_conv2",
"conv_module.depthwise_conv": "encoder.layers.*.conv_module.depthwise_conv",
"conv_module.batch_norm": "encoder.layers.*.conv_module.batch_norm",
"conv_module.layer_norm": "encoder.layers.*.conv_module.layer_norm",
"ffn1.w_1": "encoder.layers.*.ffn1.intermediate_dense",
"ffn1.w_2": "encoder.layers.*.ffn1.output_dense",
"ffn1.layer_norm": "encoder.layers.*.ffn1_layer_norm",
"ffn2.w_1": "encoder.layers.*.ffn2.intermediate_dense",
"ffn2.w_2": "encoder.layers.*.ffn2.output_dense",
"ffn2.layer_norm": "encoder.layers.*.ffn2_layer_norm",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
_UpperCAmelCase : Any = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def A ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> Dict:
'''simple docstring'''
for attribute in key.split('.' ):
UpperCamelCase = getattr(lowercase , lowercase )
if weight_type is not None:
UpperCamelCase = getattr(lowercase , lowercase ).shape
else:
UpperCamelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
UpperCamelCase = value
elif weight_type == "weight_g":
UpperCamelCase = value
elif weight_type == "weight_v":
UpperCamelCase = value
elif weight_type == "bias":
UpperCamelCase = value
elif weight_type == "running_mean":
UpperCamelCase = value
elif weight_type == "running_var":
UpperCamelCase = value
elif weight_type == "num_batches_tracked":
UpperCamelCase = value
elif weight_type == "inv_freq":
UpperCamelCase = value
else:
UpperCamelCase = value
logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def A ( lowercase , lowercase , lowercase ) -> Any:
'''simple docstring'''
UpperCamelCase = []
UpperCamelCase = fairseq_model.state_dict()
UpperCamelCase = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
UpperCamelCase = False
if "conv_layers" in name:
load_conv_layer(
lowercase , lowercase , lowercase , lowercase , hf_model.config.feat_extract_norm == 'group' , )
UpperCamelCase = True
else:
for key, mapped_key in MAPPING.items():
UpperCamelCase = 'wav2vec2_conformer.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
UpperCamelCase = True
if "*" in mapped_key:
UpperCamelCase = name.split(lowercase )[0].split('.' )[-2]
UpperCamelCase = mapped_key.replace('*' , lowercase )
if "pos_bias_u" in name:
UpperCamelCase = None
elif "pos_bias_v" in name:
UpperCamelCase = None
elif "weight_g" in name:
UpperCamelCase = 'weight_g'
elif "weight_v" in name:
UpperCamelCase = 'weight_v'
elif "bias" in name:
UpperCamelCase = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCamelCase = 'weight'
elif "running_mean" in name:
UpperCamelCase = 'running_mean'
elif "inv_freq" in name:
UpperCamelCase = 'inv_freq'
elif "running_var" in name:
UpperCamelCase = 'running_var'
elif "num_batches_tracked" in name:
UpperCamelCase = 'num_batches_tracked'
else:
UpperCamelCase = None
set_recursively(lowercase , lowercase , lowercase , lowercase , lowercase )
continue
if not is_used:
unused_weights.append(lowercase )
logger.warning(f'''Unused weights: {unused_weights}''' )
def A ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase = full_name.split('conv_layers.' )[-1]
UpperCamelCase = name.split('.' )
UpperCamelCase = int(items[0] )
UpperCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
UpperCamelCase = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
UpperCamelCase = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
UpperCamelCase = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
UpperCamelCase = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(lowercase )
@torch.no_grad()
def A ( lowercase , lowercase , lowercase=None , lowercase=None , lowercase=True ) -> int:
'''simple docstring'''
if config_path is not None:
UpperCamelCase = WavaVecaConformerConfig.from_pretrained(lowercase , hidden_act='swish' )
else:
UpperCamelCase = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
UpperCamelCase = 'rotary'
if is_finetuned:
if dict_path:
UpperCamelCase = Dictionary.load(lowercase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCamelCase = target_dict.pad_index
UpperCamelCase = target_dict.bos_index
UpperCamelCase = target_dict.eos_index
UpperCamelCase = len(target_dict.symbols )
UpperCamelCase = os.path.join(lowercase , 'vocab.json' )
if not os.path.isdir(lowercase ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(lowercase ) )
return
os.makedirs(lowercase , exist_ok=lowercase )
UpperCamelCase = target_dict.indices
# fairseq has the <pad> and <s> switched
UpperCamelCase = 0
UpperCamelCase = 1
with open(lowercase , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(lowercase , lowercase )
UpperCamelCase = WavaVecaCTCTokenizer(
lowercase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=lowercase , )
UpperCamelCase = True if config.feat_extract_norm == 'layer' else False
UpperCamelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=lowercase , return_attention_mask=lowercase , )
UpperCamelCase = WavaVecaProcessor(feature_extractor=lowercase , tokenizer=lowercase )
processor.save_pretrained(lowercase )
UpperCamelCase = WavaVecaConformerForCTC(lowercase )
else:
UpperCamelCase = WavaVecaConformerForPreTraining(lowercase )
if is_finetuned:
UpperCamelCase , UpperCamelCase , UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
UpperCamelCase = argparse.Namespace(task='audio_pretraining' )
UpperCamelCase = fairseq.tasks.setup_task(lowercase )
UpperCamelCase , UpperCamelCase , UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowercase )
UpperCamelCase = model[0].eval()
recursively_load_weights(lowercase , lowercase , not is_finetuned )
hf_wavavec.save_pretrained(lowercase )
if __name__ == "__main__":
_UpperCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
_UpperCAmelCase : Dict = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 3 | 0 |
def A ( lowercase ) -> Tuple:
'''simple docstring'''
UpperCamelCase , UpperCamelCase = [], []
while len(lowercase ) > 1:
UpperCamelCase , UpperCamelCase = min(lowercase ), max(lowercase )
start.append(lowercase )
end.append(lowercase )
collection.remove(lowercase )
collection.remove(lowercase )
end.reverse()
return start + collection + end
if __name__ == "__main__":
_UpperCAmelCase : Optional[Any] = input("Enter numbers separated by a comma:\n").strip()
_UpperCAmelCase : Dict = [int(item) for item in user_input.split(",")]
print(*merge_sort(unsorted), sep=",")
| 714 |
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
_UpperCAmelCase : Any = "\\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n"
_UpperCAmelCase : str = "\\nGLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n"
_UpperCAmelCase : List[str] = "\nCompute GLUE evaluation metric associated to each GLUE dataset.\nArgs:\n predictions: list of predictions to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\nReturns: depending on the GLUE subset, one or several of:\n \"accuracy\": Accuracy\n \"f1\": F1 score\n \"pearson\": Pearson Correlation\n \"spearmanr\": Spearman Correlation\n \"matthews_correlation\": Matthew Correlation\nExamples:\n\n >>> glue_metric = datasets.load_metric('glue', 'sst2') # 'sst2' or any of [\"mnli\", \"mnli_mismatched\", \"mnli_matched\", \"qnli\", \"rte\", \"wnli\", \"hans\"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'mrpc') # 'mrpc' or 'qqp'\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0, 'f1': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'stsb')\n >>> references = [0., 1., 2., 3., 4., 5.]\n >>> predictions = [0., 1., 2., 3., 4., 5.]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print({\"pearson\": round(results[\"pearson\"], 2), \"spearmanr\": round(results[\"spearmanr\"], 2)})\n {'pearson': 1.0, 'spearmanr': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'cola')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'matthews_correlation': 1.0}\n"
def A ( lowercase , lowercase ) -> List[str]:
'''simple docstring'''
return float((preds == labels).mean() )
def A ( lowercase , lowercase ) -> Tuple:
'''simple docstring'''
UpperCamelCase = simple_accuracy(lowercase , lowercase )
UpperCamelCase = float(fa_score(y_true=lowercase , y_pred=lowercase ) )
return {
"accuracy": acc,
"f1": fa,
}
def A ( lowercase , lowercase ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase = float(pearsonr(lowercase , lowercase )[0] )
UpperCamelCase = float(spearmanr(lowercase , lowercase )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
'references': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
} ) , codebase_urls=[] , reference_urls=[] , format='numpy' , )
def __UpperCamelCase ( self , A_ , A_ ) -> Any:
"""simple docstring"""
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(A_ , A_ )}
elif self.config_name == "stsb":
return pearson_and_spearman(A_ , A_ )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(A_ , A_ )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(A_ , A_ )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' )
| 3 | 0 |
from __future__ import annotations
def A ( lowercase , lowercase ) -> tuple[int, int]:
'''simple docstring'''
if b == 0:
return (1, 0)
((UpperCamelCase) , (UpperCamelCase)) = extended_euclid(lowercase , a % b )
UpperCamelCase = a // b
return (y, x - k * y)
def A ( lowercase , lowercase , lowercase , lowercase ) -> int:
'''simple docstring'''
((UpperCamelCase) , (UpperCamelCase)) = extended_euclid(lowercase , lowercase )
UpperCamelCase = na * na
UpperCamelCase = ra * x * na + ra * y * na
return (n % m + m) % m
def A ( lowercase , lowercase ) -> int:
'''simple docstring'''
((UpperCamelCase) , (UpperCamelCase)) = extended_euclid(lowercase , lowercase )
if b < 0:
UpperCamelCase = (b % n + n) % n
return b
def A ( lowercase , lowercase , lowercase , lowercase ) -> int:
'''simple docstring'''
UpperCamelCase , UpperCamelCase = invert_modulo(lowercase , lowercase ), invert_modulo(lowercase , lowercase )
UpperCamelCase = na * na
UpperCamelCase = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name="chinese_remainder_theorem", verbose=True)
testmod(name="chinese_remainder_theorem2", verbose=True)
testmod(name="invert_modulo", verbose=True)
testmod(name="extended_euclid", verbose=True)
| 715 |
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
_UpperCAmelCase : str = "scheduler_config.json"
class lowercase ( _SCREAMING_SNAKE_CASE ):
__lowercase : Tuple = 1
__lowercase : int = 2
__lowercase : List[Any] = 3
__lowercase : str = 4
__lowercase : Optional[Any] = 5
@dataclass
class lowercase ( _SCREAMING_SNAKE_CASE ):
__lowercase : jnp.ndarray
class lowercase :
__lowercase : Union[str, Any] = SCHEDULER_CONFIG_NAME
__lowercase : Dict = ["dtype"]
__lowercase : List[Any] = []
__lowercase : Dict = True
@classmethod
def __UpperCamelCase ( cls , A_ = None , A_ = None , A_=False , **A_ , ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = cls.load_config(
pretrained_model_name_or_path=A_ , subfolder=A_ , return_unused_kwargs=A_ , **A_ , )
UpperCamelCase , UpperCamelCase = cls.from_config(A_ , return_unused_kwargs=A_ , **A_ )
if hasattr(A_ , 'create_state' ) and getattr(A_ , 'has_state' , A_ ):
UpperCamelCase = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def __UpperCamelCase ( self , A_ , A_ = False , **A_ ) -> str:
"""simple docstring"""
self.save_config(save_directory=A_ , push_to_hub=A_ , **A_ )
@property
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
return self._get_compatibles()
@classmethod
def __UpperCamelCase ( cls ) -> int:
"""simple docstring"""
UpperCamelCase = list(set([cls.__name__] + cls._compatibles ) )
UpperCamelCase = importlib.import_module(__name__.split('.' )[0] )
UpperCamelCase = [
getattr(A_ , A_ ) for c in compatible_classes_str if hasattr(A_ , A_ )
]
return compatible_classes
def A ( lowercase , lowercase ) -> jnp.ndarray:
'''simple docstring'''
assert len(lowercase ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(lowercase ) - x.ndim) ) , lowercase )
def A ( lowercase , lowercase=0.9_9_9 , lowercase=jnp.floataa ) -> jnp.ndarray:
'''simple docstring'''
def alpha_bar(lowercase ):
return math.cos((time_step + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
UpperCamelCase = []
for i in range(lowercase ):
UpperCamelCase = i / num_diffusion_timesteps
UpperCamelCase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(lowercase ) / alpha_bar(lowercase ) , lowercase ) )
return jnp.array(lowercase , dtype=lowercase )
@flax.struct.dataclass
class lowercase :
__lowercase : jnp.ndarray
__lowercase : jnp.ndarray
__lowercase : jnp.ndarray
@classmethod
def __UpperCamelCase ( cls , A_ ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = scheduler.config
if config.trained_betas is not None:
UpperCamelCase = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
UpperCamelCase = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
UpperCamelCase = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
UpperCamelCase = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
F'''beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}''' )
UpperCamelCase = 1.0 - betas
UpperCamelCase = jnp.cumprod(A_ , axis=0 )
return cls(
alphas=A_ , betas=A_ , alphas_cumprod=A_ , )
def A ( lowercase , lowercase , lowercase , lowercase ) -> List[Any]:
'''simple docstring'''
UpperCamelCase = state.alphas_cumprod
UpperCamelCase = alphas_cumprod[timesteps] ** 0.5
UpperCamelCase = sqrt_alpha_prod.flatten()
UpperCamelCase = broadcast_to_shape_from_left(lowercase , original_samples.shape )
UpperCamelCase = (1 - alphas_cumprod[timesteps]) ** 0.5
UpperCamelCase = sqrt_one_minus_alpha_prod.flatten()
UpperCamelCase = broadcast_to_shape_from_left(lowercase , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def A ( lowercase , lowercase , lowercase , lowercase ) -> Dict:
'''simple docstring'''
UpperCamelCase , UpperCamelCase = get_sqrt_alpha_prod(lowercase , lowercase , lowercase , lowercase )
UpperCamelCase = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def A ( lowercase , lowercase , lowercase , lowercase ) -> int:
'''simple docstring'''
UpperCamelCase , UpperCamelCase = get_sqrt_alpha_prod(lowercase , lowercase , lowercase , lowercase )
UpperCamelCase = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 3 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_UpperCAmelCase : Optional[Any] = {
"configuration_efficientformer": [
"EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EfficientFormerConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Optional[Any] = ["EfficientFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[str] = [
"EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"EfficientFormerForImageClassification",
"EfficientFormerForImageClassificationWithTeacher",
"EfficientFormerModel",
"EfficientFormerPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : int = [
"TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFEfficientFormerForImageClassification",
"TFEfficientFormerForImageClassificationWithTeacher",
"TFEfficientFormerModel",
"TFEfficientFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 716 |
from abc import ABC, abstractmethod
from typing import List, Optional
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self ) -> Optional[Any]:
"""simple docstring"""
# test for the above condition
self.test()
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = 0
UpperCamelCase = False
while not completed:
if counter == 1:
self.reset()
UpperCamelCase = self.advance()
if not self.does_advance(A_ ):
raise Exception(
'Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.' )
UpperCamelCase , UpperCamelCase , UpperCamelCase = self.update(A_ )
counter += 1
if counter > 10_000:
raise Exception('update() does not fulfill the constraint.' )
if self.remaining() != 0:
raise Exception('Custom Constraint is not defined correctly.' )
@abstractmethod
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __UpperCamelCase ( self , A_ ) -> str:
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __UpperCamelCase ( self , A_ ) -> int:
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __UpperCamelCase ( self , A_=False ) -> int:
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ ) -> Any:
"""simple docstring"""
super(A_ , self ).__init__()
if not isinstance(A_ , A_ ) or len(A_ ) == 0:
raise ValueError(F'''`token_ids` has to be a non-empty list, but is {token_ids}.''' )
if any((not isinstance(A_ , A_ ) or token_id < 0) for token_id in token_ids ):
raise ValueError(F'''Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.''' )
UpperCamelCase = token_ids
UpperCamelCase = len(self.token_ids )
UpperCamelCase = -1 # the index of the currently fulfilled step
UpperCamelCase = False
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def __UpperCamelCase ( self , A_ ) -> Optional[int]:
"""simple docstring"""
if not isinstance(A_ , A_ ):
raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(A_ )}''' )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def __UpperCamelCase ( self , A_ ) -> Optional[int]:
"""simple docstring"""
if not isinstance(A_ , A_ ):
raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(A_ )}''' )
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
if self.does_advance(A_ ):
self.fulfilled_idx += 1
UpperCamelCase = True
if self.fulfilled_idx == (self.seqlen - 1):
UpperCamelCase = True
UpperCamelCase = completed
else:
# failed to make progress.
UpperCamelCase = True
self.reset()
return stepped, completed, reset
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = False
UpperCamelCase = 0
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
return self.seqlen - (self.fulfilled_idx + 1)
def __UpperCamelCase ( self , A_=False ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = PhrasalConstraint(self.token_ids )
if stateful:
UpperCamelCase = self.seqlen
UpperCamelCase = self.fulfilled_idx
UpperCamelCase = self.completed
return new_constraint
class lowercase :
def __init__( self , A_ , A_=True ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = max([len(A_ ) for one in nested_token_ids] )
UpperCamelCase = {}
for token_ids in nested_token_ids:
UpperCamelCase = root
for tidx, token_id in enumerate(A_ ):
if token_id not in level:
UpperCamelCase = {}
UpperCamelCase = level[token_id]
if no_subsets and self.has_subsets(A_ , A_ ):
raise ValueError(
'Each list in `nested_token_ids` can\'t be a complete subset of another list, but is'
F''' {nested_token_ids}.''' )
UpperCamelCase = root
def __UpperCamelCase ( self , A_ ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = self.trie
for current_token in current_seq:
UpperCamelCase = start[current_token]
UpperCamelCase = list(start.keys() )
return next_tokens
def __UpperCamelCase ( self , A_ ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = self.next_tokens(A_ )
return len(A_ ) == 0
def __UpperCamelCase ( self , A_ ) -> List[str]:
"""simple docstring"""
UpperCamelCase = list(root.values() )
if len(A_ ) == 0:
return 1
else:
return sum([self.count_leaves(A_ ) for nn in next_nodes] )
def __UpperCamelCase ( self , A_ , A_ ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = self.count_leaves(A_ )
return len(A_ ) != leaf_count
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ ) -> str:
"""simple docstring"""
super(A_ , self ).__init__()
if not isinstance(A_ , A_ ) or len(A_ ) == 0:
raise ValueError(F'''`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.''' )
if any(not isinstance(A_ , A_ ) for token_ids in nested_token_ids ):
raise ValueError(F'''`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.''' )
if any(
any((not isinstance(A_ , A_ ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
F'''Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.''' )
UpperCamelCase = DisjunctiveTrie(A_ )
UpperCamelCase = nested_token_ids
UpperCamelCase = self.trie.max_height
UpperCamelCase = []
UpperCamelCase = False
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = self.trie.next_tokens(self.current_seq )
if len(A_ ) == 0:
return None
else:
return token_list
def __UpperCamelCase ( self , A_ ) -> Optional[Any]:
"""simple docstring"""
if not isinstance(A_ , A_ ):
raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(A_ )}''' )
UpperCamelCase = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def __UpperCamelCase ( self , A_ ) -> Optional[Any]:
"""simple docstring"""
if not isinstance(A_ , A_ ):
raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(A_ )}''' )
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
if self.does_advance(A_ ):
self.current_seq.append(A_ )
UpperCamelCase = True
else:
UpperCamelCase = True
self.reset()
UpperCamelCase = self.trie.reached_leaf(self.current_seq )
UpperCamelCase = completed
return stepped, completed, reset
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
UpperCamelCase = False
UpperCamelCase = []
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def __UpperCamelCase ( self , A_=False ) -> int:
"""simple docstring"""
UpperCamelCase = DisjunctiveConstraint(self.token_ids )
if stateful:
UpperCamelCase = self.seqlen
UpperCamelCase = self.current_seq
UpperCamelCase = self.completed
return new_constraint
class lowercase :
def __init__( self , A_ ) -> Tuple:
"""simple docstring"""
UpperCamelCase = constraints
# max # of steps required to fulfill a given constraint
UpperCamelCase = max([c.seqlen for c in constraints] )
UpperCamelCase = len(A_ )
UpperCamelCase = False
self.init_state()
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = []
UpperCamelCase = None
UpperCamelCase = [constraint.copy(stateful=A_ ) for constraint in self.constraints]
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
UpperCamelCase = constraint.advance()
if isinstance(A_ , A_ ):
token_list.append(A_ )
elif isinstance(A_ , A_ ):
token_list.extend(A_ )
else:
UpperCamelCase = self.inprogress_constraint.advance()
if isinstance(A_ , A_ ):
token_list.append(A_ )
elif isinstance(A_ , A_ ):
token_list.extend(A_ )
if len(A_ ) == 0:
return None
else:
return token_list
def __UpperCamelCase ( self , A_ ) -> Any:
"""simple docstring"""
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
UpperCamelCase , UpperCamelCase = self.add(A_ )
# the entire list of constraints are fulfilled
if self.completed:
break
def __UpperCamelCase ( self , A_ ) -> int:
"""simple docstring"""
if not isinstance(A_ , A_ ):
raise ValueError(F'''`token_id` should be an `int`, but is `{token_id}`.''' )
UpperCamelCase , UpperCamelCase = False, False
if self.completed:
UpperCamelCase = True
UpperCamelCase = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
UpperCamelCase , UpperCamelCase , UpperCamelCase = self.inprogress_constraint.update(A_ )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=A_ ) )
UpperCamelCase = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
UpperCamelCase = None
if len(self.pending_constraints ) == 0:
# we're done!
UpperCamelCase = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(A_ ):
UpperCamelCase , UpperCamelCase , UpperCamelCase = pending_constraint.update(A_ )
if not stepped:
raise Exception(
'`constraint.update(token_id)` is not yielding incremental progress, '
'even though `constraint.does_advance(token_id)` is true.' )
if complete:
self.complete_constraints.append(A_ )
UpperCamelCase = None
if not complete and stepped:
UpperCamelCase = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
UpperCamelCase = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
UpperCamelCase = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def __UpperCamelCase ( self , A_=True ) -> Tuple:
"""simple docstring"""
UpperCamelCase = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
UpperCamelCase = [
constraint.copy(stateful=A_ ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
UpperCamelCase = self.inprogress_constraint.copy(stateful=A_ )
UpperCamelCase = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 3 | 0 |
def A ( lowercase ) -> list[list[int]]:
'''simple docstring'''
UpperCamelCase = []
if len(lowercase ) == 1:
return [nums.copy()]
for _ in range(len(lowercase ) ):
UpperCamelCase = nums.pop(0 )
UpperCamelCase = permute(lowercase )
for perm in permutations:
perm.append(lowercase )
result.extend(lowercase )
nums.append(lowercase )
return result
def A ( lowercase ) -> Optional[Any]:
'''simple docstring'''
def backtrack(lowercase ):
if start == len(lowercase ) - 1:
output.append(nums[:] )
else:
for i in range(lowercase , len(lowercase ) ):
UpperCamelCase , UpperCamelCase = nums[i], nums[start]
backtrack(start + 1 )
UpperCamelCase , UpperCamelCase = nums[i], nums[start] # backtrack
UpperCamelCase = []
backtrack(0 )
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
_UpperCAmelCase : List[str] = permutea([1, 2, 3])
print(res)
doctest.testmod()
| 717 |
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
_UpperCAmelCase : str = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
@register_to_config
def __init__( self , A_ , A_ = None , A_ = None ) -> Any:
"""simple docstring"""
super().__init__()
UpperCamelCase = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
UpperCamelCase = torch.zeros(A_ , A_ )
else:
UpperCamelCase = None
UpperCamelCase = torch.nn.Parameter(A_ )
class lowercase ( _SCREAMING_SNAKE_CASE ):
__lowercase : VQModel
__lowercase : CLIPTextModel
__lowercase : CLIPTokenizer
__lowercase : TransformeraDModel
__lowercase : LearnedClassifierFreeSamplingEmbeddings
__lowercase : VQDiffusionScheduler
def __init__( self , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
self.register_modules(
vqvae=A_ , transformer=A_ , text_encoder=A_ , tokenizer=A_ , scheduler=A_ , learned_classifier_free_sampling_embeddings=A_ , )
def __UpperCamelCase ( self , A_ , A_ , A_ ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = len(A_ ) if isinstance(A_ , A_ ) else 1
# get prompt text embeddings
UpperCamelCase = self.tokenizer(
A_ , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
UpperCamelCase = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
UpperCamelCase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
F''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
UpperCamelCase = text_input_ids[:, : self.tokenizer.model_max_length]
UpperCamelCase = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
UpperCamelCase = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=A_ )
# duplicate text embeddings for each generation per prompt
UpperCamelCase = prompt_embeds.repeat_interleave(A_ , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
UpperCamelCase = self.learned_classifier_free_sampling_embeddings.embeddings
UpperCamelCase = negative_prompt_embeds.unsqueeze(0 ).repeat(A_ , 1 , 1 )
else:
UpperCamelCase = [''] * batch_size
UpperCamelCase = text_input_ids.shape[-1]
UpperCamelCase = self.tokenizer(
A_ , padding='max_length' , max_length=A_ , truncation=A_ , return_tensors='pt' , )
UpperCamelCase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
UpperCamelCase = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=A_ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCamelCase = negative_prompt_embeds.shape[1]
UpperCamelCase = negative_prompt_embeds.repeat(1 , A_ , 1 )
UpperCamelCase = negative_prompt_embeds.view(batch_size * num_images_per_prompt , A_ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCamelCase = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self , A_ , A_ = 100 , A_ = 5.0 , A_ = 1.0 , A_ = 1 , A_ = None , A_ = None , A_ = "pil" , A_ = True , A_ = None , A_ = 1 , ) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
if isinstance(A_ , A_ ):
UpperCamelCase = 1
elif isinstance(A_ , A_ ):
UpperCamelCase = len(A_ )
else:
raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(A_ )}''' )
UpperCamelCase = batch_size * num_images_per_prompt
UpperCamelCase = guidance_scale > 1.0
UpperCamelCase = self._encode_prompt(A_ , A_ , A_ )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(A_ , A_ ) or callback_steps <= 0)
):
raise ValueError(
F'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
F''' {type(A_ )}.''' )
# get the initial completely masked latents unless the user supplied it
UpperCamelCase = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
UpperCamelCase = self.transformer.num_vector_embeds - 1
UpperCamelCase = torch.full(A_ , A_ ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'
F''' {self.transformer.num_vector_embeds - 1} (inclusive).''' )
UpperCamelCase = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(A_ , device=self.device )
UpperCamelCase = self.scheduler.timesteps.to(self.device )
UpperCamelCase = latents
for i, t in enumerate(self.progress_bar(A_ ) ):
# expand the sample if we are doing classifier free guidance
UpperCamelCase = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
UpperCamelCase = self.transformer(A_ , encoder_hidden_states=A_ , timestep=A_ ).sample
if do_classifier_free_guidance:
UpperCamelCase , UpperCamelCase = model_output.chunk(2 )
UpperCamelCase = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(A_ , dim=1 , keepdim=A_ )
UpperCamelCase = self.truncate(A_ , A_ )
# remove `log(0)`'s (`-inf`s)
UpperCamelCase = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase = self.scheduler.step(A_ , timestep=A_ , sample=A_ , generator=A_ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(A_ , A_ , A_ )
UpperCamelCase = self.vqvae.config.vq_embed_dim
UpperCamelCase = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
UpperCamelCase = self.vqvae.quantize.get_codebook_entry(A_ , shape=A_ )
UpperCamelCase = self.vqvae.decode(A_ , force_not_quantize=A_ ).sample
UpperCamelCase = (image / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCamelCase = self.numpy_to_pil(A_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A_ )
def __UpperCamelCase ( self , A_ , A_ ) -> torch.FloatTensor:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = torch.sort(A_ , 1 , descending=A_ )
UpperCamelCase = torch.exp(A_ )
UpperCamelCase = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
UpperCamelCase = torch.full_like(keep_mask[:, 0:1, :] , A_ )
UpperCamelCase = torch.cat((all_true, keep_mask) , dim=1 )
UpperCamelCase = keep_mask[:, :-1, :]
UpperCamelCase = keep_mask.gather(1 , indices.argsort(1 ) )
UpperCamelCase = log_p_x_0.clone()
UpperCamelCase = -torch.inf # -inf = log(0)
return rv
| 3 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCAmelCase : List[Any] = {
"configuration_longformer": [
"LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"LongformerConfig",
"LongformerOnnxConfig",
],
"tokenization_longformer": ["LongformerTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Tuple = ["LongformerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Tuple = [
"LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"LongformerForMaskedLM",
"LongformerForMultipleChoice",
"LongformerForQuestionAnswering",
"LongformerForSequenceClassification",
"LongformerForTokenClassification",
"LongformerModel",
"LongformerPreTrainedModel",
"LongformerSelfAttention",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : str = [
"TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFLongformerForMaskedLM",
"TFLongformerForMultipleChoice",
"TFLongformerForQuestionAnswering",
"TFLongformerForSequenceClassification",
"TFLongformerForTokenClassification",
"TFLongformerModel",
"TFLongformerPreTrainedModel",
"TFLongformerSelfAttention",
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
_UpperCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 719 |
from collections.abc import Callable
def A ( lowercase , lowercase , lowercase ) -> float:
'''simple docstring'''
UpperCamelCase = a
UpperCamelCase = b
if function(lowercase ) == 0: # one of the a or b is a root for the function
return a
elif function(lowercase ) == 0:
return b
elif (
function(lowercase ) * function(lowercase ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('could not find root in given interval.' )
else:
UpperCamelCase = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(lowercase ) == 0:
return mid
elif function(lowercase ) * function(lowercase ) < 0:
UpperCamelCase = mid
else:
UpperCamelCase = mid
UpperCamelCase = start + (end - start) / 2.0
return mid
def A ( lowercase ) -> float:
'''simple docstring'''
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1_000))
import doctest
doctest.testmod()
| 3 | 0 |
from __future__ import annotations
_UpperCAmelCase : Any = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
_UpperCAmelCase : Any = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def A ( lowercase ) -> list[float]:
'''simple docstring'''
UpperCamelCase = []
UpperCamelCase = len(lowercase )
for i in range(lowercase ):
UpperCamelCase = -1
for j in range(i + 1 , lowercase ):
if arr[i] < arr[j]:
UpperCamelCase = arr[j]
break
result.append(lowercase )
return result
def A ( lowercase ) -> list[float]:
'''simple docstring'''
UpperCamelCase = []
for i, outer in enumerate(lowercase ):
UpperCamelCase = -1
for inner in arr[i + 1 :]:
if outer < inner:
UpperCamelCase = inner
break
result.append(lowercase )
return result
def A ( lowercase ) -> list[float]:
'''simple docstring'''
UpperCamelCase = len(lowercase )
UpperCamelCase = []
UpperCamelCase = [-1] * arr_size
for index in reversed(range(lowercase ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
UpperCamelCase = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
_UpperCAmelCase : List[str] = (
"from __main__ import arr, next_greatest_element_slow, "
"next_greatest_element_fast, next_greatest_element"
)
print(
"next_greatest_element_slow():",
timeit("next_greatest_element_slow(arr)", setup=setup),
)
print(
"next_greatest_element_fast():",
timeit("next_greatest_element_fast(arr)", setup=setup),
)
print(
" next_greatest_element():",
timeit("next_greatest_element(arr)", setup=setup),
)
| 720 |
import os
_UpperCAmelCase : int = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 100, "D": 500, "M": 1_000}
def A ( lowercase ) -> int:
'''simple docstring'''
UpperCamelCase = 0
UpperCamelCase = 0
while index < len(lowercase ) - 1:
UpperCamelCase = SYMBOLS[numerals[index]]
UpperCamelCase = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def A ( lowercase ) -> str:
'''simple docstring'''
UpperCamelCase = ''
UpperCamelCase = num // 1_000
numerals += m_count * "M"
num %= 1_000
UpperCamelCase = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
UpperCamelCase = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def A ( lowercase = "/p089_roman.txt" ) -> int:
'''simple docstring'''
UpperCamelCase = 0
with open(os.path.dirname(lowercase ) + roman_numerals_filename ) as filea:
UpperCamelCase = filea.readlines()
for line in lines:
UpperCamelCase = line.strip()
UpperCamelCase = parse_roman_numerals(lowercase )
UpperCamelCase = generate_roman_numerals(lowercase )
savings += len(lowercase ) - len(lowercase )
return savings
if __name__ == "__main__":
print(F'''{solution() = }''')
| 3 | 0 |
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def A ( lowercase ) -> List[str]:
'''simple docstring'''
if is_torch_version('<' , '2.0.0' ) or not hasattr(lowercase , '_dynamo' ):
return False
return isinstance(lowercase , torch._dynamo.eval_frame.OptimizedModule )
def A ( lowercase , lowercase = True ) -> Any:
'''simple docstring'''
UpperCamelCase = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
UpperCamelCase = is_compiled_module(lowercase )
if is_compiled:
UpperCamelCase = model
UpperCamelCase = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(lowercase , lowercase ):
UpperCamelCase = model.module
if not keep_fpaa_wrapper:
UpperCamelCase = getattr(lowercase , 'forward' )
UpperCamelCase = model.__dict__.pop('_original_forward' , lowercase )
if original_forward is not None:
while hasattr(lowercase , '__wrapped__' ):
UpperCamelCase = forward.__wrapped__
if forward == original_forward:
break
UpperCamelCase = forward
if getattr(lowercase , '_converted_to_transformer_engine' , lowercase ):
convert_model(lowercase , to_transformer_engine=lowercase )
if is_compiled:
UpperCamelCase = model
UpperCamelCase = compiled_model
return model
def A ( ) -> Optional[int]:
'''simple docstring'''
PartialState().wait_for_everyone()
def A ( lowercase , lowercase ) -> Union[str, Any]:
'''simple docstring'''
if PartialState().distributed_type == DistributedType.TPU:
xm.save(lowercase , lowercase )
elif PartialState().local_process_index == 0:
torch.save(lowercase , lowercase )
@contextmanager
def A ( **lowercase ) -> Optional[Any]:
'''simple docstring'''
for key, value in kwargs.items():
UpperCamelCase = str(lowercase )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def A ( lowercase ) -> str:
'''simple docstring'''
if not hasattr(lowercase , '__qualname__' ) and not hasattr(lowercase , '__name__' ):
UpperCamelCase = getattr(lowercase , '__class__' , lowercase )
if hasattr(lowercase , '__qualname__' ):
return obj.__qualname__
if hasattr(lowercase , '__name__' ):
return obj.__name__
return str(lowercase )
def A ( lowercase , lowercase ) -> int:
'''simple docstring'''
for key, value in source.items():
if isinstance(lowercase , lowercase ):
UpperCamelCase = destination.setdefault(lowercase , {} )
merge_dicts(lowercase , lowercase )
else:
UpperCamelCase = value
return destination
def A ( lowercase = None ) -> bool:
'''simple docstring'''
if port is None:
UpperCamelCase = 29_500
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(('localhost', port) ) == 0
| 721 |
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize('dataset_size' , [None, 400 * 2**20, 600 * 2**20] )
@pytest.mark.parametrize('input_in_memory_max_size' , ['default', 0, 100 * 2**20, 900 * 2**20] )
def A ( lowercase , lowercase , lowercase ) -> Union[str, Any]:
'''simple docstring'''
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , 'IN_MEMORY_MAX_SIZE' , lowercase )
UpperCamelCase = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
UpperCamelCase = dataset_size < in_memory_max_size
else:
UpperCamelCase = False
UpperCamelCase = is_small_dataset(lowercase )
assert result == expected
| 3 | 0 |
from collections.abc import Generator
def A ( ) -> Generator[int, None, None]:
'''simple docstring'''
UpperCamelCase , UpperCamelCase = 0, 1
while True:
UpperCamelCase , UpperCamelCase = b, a + b
yield b
def A ( lowercase = 1_000 ) -> int:
'''simple docstring'''
UpperCamelCase = 1
UpperCamelCase = fibonacci_generator()
while len(str(next(lowercase ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 700 |
def A ( lowercase , lowercase ) -> str:
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
UpperCamelCase = str(bin(lowercase ) )[2:] # remove the leading "0b"
UpperCamelCase = str(bin(lowercase ) )[2:] # remove the leading "0b"
UpperCamelCase = max(len(lowercase ) , len(lowercase ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(lowercase ) , b_binary.zfill(lowercase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 3 | 0 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
_UpperCAmelCase : str = {
"microsoft/unispeech-sat-base-100h-libri-ft": (
"https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json"
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class lowercase ( _SCREAMING_SNAKE_CASE ):
__lowercase : Optional[Any] = "unispeech-sat"
def __init__( self , A_=32 , A_=768 , A_=12 , A_=12 , A_=3_072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=0.1 , A_=0.0 , A_=0.0 , A_=0.1 , A_=0.1 , A_=0.02 , A_=1e-5 , A_="group" , A_="gelu" , A_=(512, 512, 512, 512, 512, 512, 512) , A_=(5, 2, 2, 2, 2, 2, 2) , A_=(10, 3, 3, 3, 3, 2, 2) , A_=False , A_=128 , A_=16 , A_=False , A_=True , A_=0.05 , A_=10 , A_=2 , A_=0.0 , A_=10 , A_=0 , A_=320 , A_=2 , A_=0.1 , A_=100 , A_=256 , A_=256 , A_=0.1 , A_="mean" , A_=False , A_=False , A_=256 , A_=(512, 512, 512, 512, 1_500) , A_=(5, 3, 3, 1, 1) , A_=(1, 2, 3, 1, 1) , A_=512 , A_=0 , A_=1 , A_=2 , A_=504 , **A_ , ) -> int:
"""simple docstring"""
super().__init__(**A_ , pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ )
UpperCamelCase = hidden_size
UpperCamelCase = feat_extract_norm
UpperCamelCase = feat_extract_activation
UpperCamelCase = list(A_ )
UpperCamelCase = list(A_ )
UpperCamelCase = list(A_ )
UpperCamelCase = conv_bias
UpperCamelCase = num_conv_pos_embeddings
UpperCamelCase = num_conv_pos_embedding_groups
UpperCamelCase = len(self.conv_dim )
UpperCamelCase = num_hidden_layers
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = num_attention_heads
UpperCamelCase = hidden_dropout
UpperCamelCase = attention_dropout
UpperCamelCase = activation_dropout
UpperCamelCase = feat_proj_dropout
UpperCamelCase = final_dropout
UpperCamelCase = layerdrop
UpperCamelCase = layer_norm_eps
UpperCamelCase = initializer_range
UpperCamelCase = vocab_size
UpperCamelCase = num_clusters
UpperCamelCase = do_stable_layer_norm
UpperCamelCase = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCamelCase = apply_spec_augment
UpperCamelCase = mask_time_prob
UpperCamelCase = mask_time_length
UpperCamelCase = mask_time_min_masks
UpperCamelCase = mask_feature_prob
UpperCamelCase = mask_feature_length
UpperCamelCase = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
UpperCamelCase = num_codevectors_per_group
UpperCamelCase = num_codevector_groups
UpperCamelCase = contrastive_logits_temperature
UpperCamelCase = feat_quantizer_dropout
UpperCamelCase = num_negatives
UpperCamelCase = codevector_dim
UpperCamelCase = proj_codevector_dim
UpperCamelCase = diversity_loss_weight
# ctc loss
UpperCamelCase = ctc_loss_reduction
UpperCamelCase = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
UpperCamelCase = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
UpperCamelCase = list(A_ )
UpperCamelCase = list(A_ )
UpperCamelCase = list(A_ )
UpperCamelCase = xvector_output_dim
@property
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 701 |
import re
def A ( lowercase ) -> str:
'''simple docstring'''
if len(re.findall('[ATCG]' , lowercase ) ) != len(lowercase ):
raise ValueError('Invalid Strand' )
return dna.translate(dna.maketrans('ATCG' , 'TAGC' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 3 | 0 |
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_=0.01 , A_=1_000 ) -> str:
"""simple docstring"""
UpperCamelCase = p_stop
UpperCamelCase = max_length
def __iter__( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = 0
UpperCamelCase = False
while not stop and count < self.max_length:
yield count
count += 1
UpperCamelCase = random.random() < self.p_stop
class lowercase ( unittest.TestCase ):
def __UpperCamelCase ( self , A_ , A_ , A_=False , A_=True ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = [
BatchSamplerShard(A_ , 2 , A_ , split_batches=A_ , even_batches=A_ )
for i in range(2 )
]
UpperCamelCase = [list(A_ ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(A_ ) for shard in batch_sampler_shards] , [len(A_ ) for e in expected] )
self.assertListEqual(A_ , A_ )
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=A_ )
UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(A_ , A_ )
UpperCamelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=A_ )
# Expected shouldn't change
self.check_batch_sampler_shards(A_ , A_ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
UpperCamelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=A_ )
UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(A_ , A_ )
UpperCamelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=A_ )
UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(A_ , A_ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
UpperCamelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=A_ )
UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(A_ , A_ )
UpperCamelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=A_ )
UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(A_ , A_ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
UpperCamelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=A_ )
UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(A_ , A_ )
UpperCamelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=A_ )
UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(A_ , A_ )
# Check the shards when the dataset is very small.
UpperCamelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=A_ )
UpperCamelCase = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(A_ , A_ )
UpperCamelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=A_ )
UpperCamelCase = [[], []]
self.check_batch_sampler_shards(A_ , A_ )
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=A_ )
UpperCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(A_ , A_ , split_batches=A_ )
UpperCamelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=A_ )
# Expected shouldn't change
self.check_batch_sampler_shards(A_ , A_ , split_batches=A_ )
# Check the shards when the dataset is not a round multiple of batch size.
UpperCamelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=A_ )
UpperCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(A_ , A_ , split_batches=A_ )
UpperCamelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=A_ )
UpperCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(A_ , A_ , split_batches=A_ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
UpperCamelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=A_ )
UpperCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(A_ , A_ , split_batches=A_ )
UpperCamelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=A_ )
UpperCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(A_ , A_ , split_batches=A_ )
# Check the shards when the dataset is very small.
UpperCamelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=A_ )
UpperCamelCase = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(A_ , A_ , split_batches=A_ )
UpperCamelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=A_ )
UpperCamelCase = [[], []]
self.check_batch_sampler_shards(A_ , A_ , split_batches=A_ )
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=A_ )
UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(A_ , A_ , even_batches=A_ )
UpperCamelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=A_ )
# Expected shouldn't change
self.check_batch_sampler_shards(A_ , A_ , even_batches=A_ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
UpperCamelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=A_ )
UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(A_ , A_ , even_batches=A_ )
UpperCamelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=A_ )
UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(A_ , A_ , even_batches=A_ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
UpperCamelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=A_ )
UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(A_ , A_ , even_batches=A_ )
UpperCamelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=A_ )
UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(A_ , A_ , even_batches=A_ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
UpperCamelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=A_ )
UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(A_ , A_ , even_batches=A_ )
UpperCamelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=A_ )
UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(A_ , A_ , even_batches=A_ )
# Check the shards when the dataset is very small.
UpperCamelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=A_ )
UpperCamelCase = [[[0, 1]], []]
self.check_batch_sampler_shards(A_ , A_ , even_batches=A_ )
UpperCamelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=A_ )
UpperCamelCase = [[], []]
self.check_batch_sampler_shards(A_ , A_ , even_batches=A_ )
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=A_ )
UpperCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(A_ , A_ , split_batches=A_ , even_batches=A_ )
UpperCamelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=A_ )
# Expected shouldn't change
self.check_batch_sampler_shards(A_ , A_ , split_batches=A_ , even_batches=A_ )
# Check the shards when the dataset is not a round multiple of batch size.
UpperCamelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=A_ )
UpperCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(A_ , A_ , split_batches=A_ , even_batches=A_ )
UpperCamelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=A_ )
UpperCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(A_ , A_ , split_batches=A_ , even_batches=A_ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
UpperCamelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=A_ )
UpperCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(A_ , A_ , split_batches=A_ , even_batches=A_ )
UpperCamelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=A_ )
UpperCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(A_ , A_ , split_batches=A_ , even_batches=A_ )
# Check the shards when the dataset is very small.
UpperCamelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=A_ )
UpperCamelCase = [[[0, 1]], []]
self.check_batch_sampler_shards(A_ , A_ , split_batches=A_ , even_batches=A_ )
UpperCamelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=A_ )
UpperCamelCase = [[], []]
self.check_batch_sampler_shards(A_ , A_ , split_batches=A_ , even_batches=A_ )
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
UpperCamelCase = [BatchSamplerShard(A_ , 2 , A_ , even_batches=A_ ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def __UpperCamelCase ( self , A_ , A_ , A_ , A_=False , A_=2 , A_=False ) -> Union[str, Any]:
"""simple docstring"""
random.seed(A_ )
UpperCamelCase = list(A_ )
UpperCamelCase = [
IterableDatasetShard(
A_ , batch_size=A_ , drop_last=A_ , num_processes=A_ , process_index=A_ , split_batches=A_ , )
for i in range(A_ )
]
UpperCamelCase = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(A_ )
iterable_dataset_lists.append(list(A_ ) )
UpperCamelCase = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
UpperCamelCase = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(A_ ) , len(A_ ) )
self.assertTrue(len(A_ ) % shard_batch_size == 0 )
UpperCamelCase = []
for idx in range(0 , len(A_ ) , A_ ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(A_ ) < len(A_ ):
reference += reference
self.assertListEqual(A_ , reference[: len(A_ )] )
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = 42
UpperCamelCase = RandomIterableDataset()
self.check_iterable_dataset_shards(A_ , A_ , batch_size=4 , drop_last=A_ , split_batches=A_ )
self.check_iterable_dataset_shards(A_ , A_ , batch_size=4 , drop_last=A_ , split_batches=A_ )
self.check_iterable_dataset_shards(A_ , A_ , batch_size=4 , drop_last=A_ , split_batches=A_ )
self.check_iterable_dataset_shards(A_ , A_ , batch_size=4 , drop_last=A_ , split_batches=A_ )
# Edge case with a very small dataset
UpperCamelCase = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(A_ , A_ , batch_size=4 , drop_last=A_ , split_batches=A_ )
self.check_iterable_dataset_shards(A_ , A_ , batch_size=4 , drop_last=A_ , split_batches=A_ )
self.check_iterable_dataset_shards(A_ , A_ , batch_size=4 , drop_last=A_ , split_batches=A_ )
self.check_iterable_dataset_shards(A_ , A_ , batch_size=4 , drop_last=A_ , split_batches=A_ )
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = BatchSampler(range(16 ) , batch_size=4 , drop_last=A_ )
UpperCamelCase = SkipBatchSampler(A_ , 2 )
self.assertListEqual(list(A_ ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = DataLoader(list(range(16 ) ) , batch_size=4 )
UpperCamelCase = skip_first_batches(A_ , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(A_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(A_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
Accelerator()
UpperCamelCase = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(A_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(A_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) | 702 |
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class lowercase ( _SCREAMING_SNAKE_CASE ):
__lowercase : Dict = (DDPMScheduler,)
def __UpperCamelCase ( self , **A_ ) -> Dict:
"""simple docstring"""
UpperCamelCase = {
'num_train_timesteps': 1_000,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'variance_type': 'fixed_small',
'clip_sample': True,
}
config.update(**A_ )
return config
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=A_ )
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=A_ , beta_end=A_ )
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=A_ )
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=A_ )
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=A_ )
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
self.check_over_configs(thresholding=A_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=A_ , prediction_type=A_ , sample_max_value=A_ , )
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=A_ )
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
for t in [0, 500, 999]:
self.check_over_forward(time_step=A_ )
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**A_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1e-5
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**A_ )
UpperCamelCase = len(A_ )
UpperCamelCase = self.dummy_model()
UpperCamelCase = self.dummy_sample_deter
UpperCamelCase = torch.manual_seed(0 )
for t in reversed(range(A_ ) ):
# 1. predict noise residual
UpperCamelCase = model(A_ , A_ )
# 2. predict previous mean of sample x_t-1
UpperCamelCase = scheduler.step(A_ , A_ , A_ , generator=A_ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
UpperCamelCase = pred_prev_sample
UpperCamelCase = torch.sum(torch.abs(A_ ) )
UpperCamelCase = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 258.9606 ) < 1e-2
assert abs(result_mean.item() - 0.3372 ) < 1e-3
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config(prediction_type='v_prediction' )
UpperCamelCase = scheduler_class(**A_ )
UpperCamelCase = len(A_ )
UpperCamelCase = self.dummy_model()
UpperCamelCase = self.dummy_sample_deter
UpperCamelCase = torch.manual_seed(0 )
for t in reversed(range(A_ ) ):
# 1. predict noise residual
UpperCamelCase = model(A_ , A_ )
# 2. predict previous mean of sample x_t-1
UpperCamelCase = scheduler.step(A_ , A_ , A_ , generator=A_ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
UpperCamelCase = pred_prev_sample
UpperCamelCase = torch.sum(torch.abs(A_ ) )
UpperCamelCase = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 202.0296 ) < 1e-2
assert abs(result_mean.item() - 0.2631 ) < 1e-3
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**A_ )
UpperCamelCase = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=A_ )
UpperCamelCase = scheduler.timesteps
for i, timestep in enumerate(A_ ):
if i == len(A_ ) - 1:
UpperCamelCase = -1
else:
UpperCamelCase = timesteps[i + 1]
UpperCamelCase = scheduler.previous_timestep(A_ )
UpperCamelCase = prev_t.item()
self.assertEqual(A_ , A_ )
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**A_ )
UpperCamelCase = [100, 87, 50, 51, 0]
with self.assertRaises(A_ , msg='`custom_timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=A_ )
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**A_ )
UpperCamelCase = [100, 87, 50, 1, 0]
UpperCamelCase = len(A_ )
with self.assertRaises(A_ , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=A_ , timesteps=A_ )
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**A_ )
UpperCamelCase = [scheduler.config.num_train_timesteps]
with self.assertRaises(
A_ , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=A_ )
| 3 | 0 |
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def A ( lowercase , lowercase ) -> Optional[int]:
'''simple docstring'''
assert isinstance(lowercase , lowercase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def A ( lowercase , lowercase , lowercase ) -> Tuple:
'''simple docstring'''
UpperCamelCase = tmp_path / 'cache'
UpperCamelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCamelCase = ParquetDatasetReader(lowercase , cache_dir=lowercase , keep_in_memory=lowercase ).read()
_check_parquet_dataset(lowercase , lowercase )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def A ( lowercase , lowercase , lowercase ) -> Tuple:
'''simple docstring'''
UpperCamelCase = tmp_path / 'cache'
UpperCamelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
UpperCamelCase = features.copy() if features else default_expected_features
UpperCamelCase = (
Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase = ParquetDatasetReader(lowercase , features=lowercase , cache_dir=lowercase ).read()
_check_parquet_dataset(lowercase , lowercase )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def A ( lowercase , lowercase , lowercase ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase = tmp_path / 'cache'
UpperCamelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
UpperCamelCase = ParquetDatasetReader(lowercase , cache_dir=lowercase , split=lowercase ).read()
_check_parquet_dataset(lowercase , lowercase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('path_type' , [str, list] )
def A ( lowercase , lowercase , lowercase ) -> Union[str, Any]:
'''simple docstring'''
if issubclass(lowercase , lowercase ):
UpperCamelCase = parquet_path
elif issubclass(lowercase , lowercase ):
UpperCamelCase = [parquet_path]
UpperCamelCase = tmp_path / 'cache'
UpperCamelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
UpperCamelCase = ParquetDatasetReader(lowercase , cache_dir=lowercase ).read()
_check_parquet_dataset(lowercase , lowercase )
def A ( lowercase , lowercase , lowercase=("train",) ) -> Tuple:
'''simple docstring'''
assert isinstance(lowercase , lowercase )
for split in splits:
UpperCamelCase = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def A ( lowercase , lowercase , lowercase ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase = tmp_path / 'cache'
UpperCamelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCamelCase = ParquetDatasetReader(
{'train': parquet_path} , cache_dir=lowercase , keep_in_memory=lowercase ).read()
_check_parquet_datasetdict(lowercase , lowercase )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def A ( lowercase , lowercase , lowercase ) -> List[Any]:
'''simple docstring'''
UpperCamelCase = tmp_path / 'cache'
UpperCamelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
UpperCamelCase = features.copy() if features else default_expected_features
UpperCamelCase = (
Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase = ParquetDatasetReader({'train': parquet_path} , features=lowercase , cache_dir=lowercase ).read()
_check_parquet_datasetdict(lowercase , lowercase )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def A ( lowercase , lowercase , lowercase ) -> Union[str, Any]:
'''simple docstring'''
if split:
UpperCamelCase = {split: parquet_path}
else:
UpperCamelCase = 'train'
UpperCamelCase = {'train': parquet_path, 'test': parquet_path}
UpperCamelCase = tmp_path / 'cache'
UpperCamelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
UpperCamelCase = ParquetDatasetReader(lowercase , cache_dir=lowercase ).read()
_check_parquet_datasetdict(lowercase , lowercase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def A ( lowercase , lowercase ) -> List[Any]:
'''simple docstring'''
UpperCamelCase = ParquetDatasetWriter(lowercase , tmp_path / 'foo.parquet' )
assert writer.write() > 0
UpperCamelCase = pq.ParquetFile(tmp_path / 'foo.parquet' )
UpperCamelCase = pf.read()
assert dataset.data.table == output_table
def A ( lowercase , lowercase ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase = str(shared_datadir / 'test_image_rgb.jpg' )
UpperCamelCase = {'image': [image_path]}
UpperCamelCase = Features({'image': Image()} )
UpperCamelCase = Dataset.from_dict(lowercase , features=lowercase )
UpperCamelCase = ParquetDatasetWriter(lowercase , tmp_path / 'foo.parquet' )
assert writer.write() > 0
UpperCamelCase = Dataset.from_parquet(str(tmp_path / 'foo.parquet' ) )
assert dataset.features == reloaded_dataset.features
UpperCamelCase = ParquetDatasetReader(str(tmp_path / 'foo.parquet' ) , streaming=lowercase ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
'feature, expected' , [
(Features({'foo': Value('int32' )} ), None),
(Features({'image': Image(), 'foo': Value('int32' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({'nested': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def A ( lowercase , lowercase ) -> Union[str, Any]:
'''simple docstring'''
assert get_writer_batch_size(lowercase ) == expected
| 703 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
_UpperCAmelCase : List[str] = None
_UpperCAmelCase : Any = logging.get_logger(__name__)
_UpperCAmelCase : Tuple = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
_UpperCAmelCase : List[str] = {
"vocab_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model",
},
"tokenizer_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/tokenizer.json",
},
}
_UpperCAmelCase : Optional[int] = {
"camembert-base": 512,
}
_UpperCAmelCase : Union[str, Any] = "▁"
class lowercase ( _SCREAMING_SNAKE_CASE ):
__lowercase : str = VOCAB_FILES_NAMES
__lowercase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
__lowercase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : List[str] = ["input_ids", "attention_mask"]
__lowercase : Tuple = CamembertTokenizer
def __init__( self , A_=None , A_=None , A_="<s>" , A_="</s>" , A_="</s>" , A_="<s>" , A_="<unk>" , A_="<pad>" , A_="<mask>" , A_=["<s>NOTUSED", "</s>NOTUSED"] , **A_ , ) -> List[Any]:
"""simple docstring"""
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else mask_token
super().__init__(
A_ , tokenizer_file=A_ , bos_token=A_ , eos_token=A_ , sep_token=A_ , cls_token=A_ , unk_token=A_ , pad_token=A_ , mask_token=A_ , additional_special_tokens=A_ , **A_ , )
UpperCamelCase = vocab_file
UpperCamelCase = False if not self.vocab_file else True
def __UpperCamelCase ( self , A_ , A_ = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
UpperCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __UpperCamelCase ( self , A_ , A_ = None ) -> List[int]:
"""simple docstring"""
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCamelCase ( self , A_ , A_ = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(A_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCamelCase = os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ):
copyfile(self.vocab_file , A_ )
return (out_vocab_file,)
| 3 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
_UpperCAmelCase : Tuple = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
_UpperCAmelCase : List[str] = TaTokenizerFast
_UpperCAmelCase : Tuple = {"configuration_mt5": ["MT5Config", "MT5OnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Union[str, Any] = [
"MT5EncoderModel",
"MT5ForConditionalGeneration",
"MT5ForQuestionAnswering",
"MT5Model",
"MT5PreTrainedModel",
"MT5Stack",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Any = ["TFMT5EncoderModel", "TFMT5ForConditionalGeneration", "TFMT5Model"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[Any] = ["FlaxMT5EncoderModel", "FlaxMT5ForConditionalGeneration", "FlaxMT5Model"]
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
_UpperCAmelCase : Optional[Any] = _LazyModule(
__name__,
globals()["__file__"],
_import_structure,
extra_objects={"MT5Tokenizer": MTaTokenizer, "MT5TokenizerFast": MTaTokenizerFast},
module_spec=__spec__,
)
| 704 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCAmelCase : Union[str, Any] = {
"configuration_git": ["GIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "GitConfig", "GitVisionConfig"],
"processing_git": ["GitProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Dict = [
"GIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"GitForCausalLM",
"GitModel",
"GitPreTrainedModel",
"GitVisionModel",
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
_UpperCAmelCase : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 3 | 0 |
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ , A_ , A_ ) -> int:
"""simple docstring"""
UpperCamelCase = dataset
UpperCamelCase = process
UpperCamelCase = params
def __len__( self ) -> int:
"""simple docstring"""
return len(self.dataset )
def __getitem__( self , A_ ) -> str:
"""simple docstring"""
UpperCamelCase = self.dataset[i]
UpperCamelCase = self.process(A_ , **self.params )
return processed
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ , A_ , A_ , A_=None ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = loader
UpperCamelCase = infer
UpperCamelCase = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
UpperCamelCase = None
UpperCamelCase = loader_batch_size
# Internal bookkeeping
UpperCamelCase = None
UpperCamelCase = None
def __len__( self ) -> int:
"""simple docstring"""
return len(self.loader )
def __iter__( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = iter(self.loader )
return self
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
UpperCamelCase = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
UpperCamelCase = {}
for k, element in self._loader_batch_data.items():
if isinstance(A_ , A_ ):
# Convert ModelOutput to tuple first
UpperCamelCase = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
UpperCamelCase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
UpperCamelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(A_ , A_ ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
UpperCamelCase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
UpperCamelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
UpperCamelCase = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
UpperCamelCase = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
UpperCamelCase = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
UpperCamelCase = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
UpperCamelCase = self._loader_batch_data.__class__(A_ )
self._loader_batch_index += 1
return result
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
UpperCamelCase = next(self.iterator )
UpperCamelCase = self.infer(A_ , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(A_ , torch.Tensor ):
UpperCamelCase = processed
else:
UpperCamelCase = list(processed.keys() )[0]
UpperCamelCase = processed[key]
if isinstance(A_ , A_ ):
UpperCamelCase = len(A_ )
else:
UpperCamelCase = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
UpperCamelCase = observed_batch_size
# Setting internal index to unwrap the batch
UpperCamelCase = processed
UpperCamelCase = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ , A_ , A_ , A_=None ) -> Optional[Any]:
"""simple docstring"""
super().__init__(A_ , A_ , A_ )
def __iter__( self ) -> str:
"""simple docstring"""
UpperCamelCase = iter(self.loader )
UpperCamelCase = None
return self
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
if self.subiterator is None:
UpperCamelCase = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
UpperCamelCase = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
UpperCamelCase = self.infer(next(self.iterator ) , **self.params )
UpperCamelCase = next(self.subiterator )
return processed
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __iter__( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = iter(self.loader )
return self
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = False
UpperCamelCase = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
UpperCamelCase = self.loader_batch_item()
UpperCamelCase = item.pop('is_last' )
accumulator.append(A_ )
if is_last:
return accumulator
while not is_last:
UpperCamelCase = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(A_ , torch.Tensor ):
UpperCamelCase = processed
else:
UpperCamelCase = list(processed.keys() )[0]
UpperCamelCase = processed[key]
if isinstance(A_ , A_ ):
UpperCamelCase = len(A_ )
else:
UpperCamelCase = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
UpperCamelCase = observed_batch_size
UpperCamelCase = processed
UpperCamelCase = 0
while self._loader_batch_index < self.loader_batch_size:
UpperCamelCase = self.loader_batch_item()
UpperCamelCase = item.pop('is_last' )
accumulator.append(A_ )
if is_last:
return accumulator
else:
UpperCamelCase = processed
UpperCamelCase = item.pop('is_last' )
accumulator.append(A_ )
return accumulator
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ , A_ ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = dataset
UpperCamelCase = key
def __len__( self ) -> Union[str, Any]:
"""simple docstring"""
return len(self.dataset )
def __getitem__( self , A_ ) -> Optional[Any]:
"""simple docstring"""
return self.dataset[i][self.key]
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ , A_ , A_ ) -> str:
"""simple docstring"""
UpperCamelCase = dataset
UpperCamelCase = keya
UpperCamelCase = keya
def __len__( self ) -> Any:
"""simple docstring"""
return len(self.dataset )
def __getitem__( self , A_ ) -> int:
"""simple docstring"""
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 705 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCAmelCase : Tuple = logging.get_logger(__name__)
_UpperCAmelCase : Union[str, Any] = {
"facebook/data2vec-text-base": "https://huggingface.co/data2vec/resolve/main/config.json",
}
class lowercase ( _SCREAMING_SNAKE_CASE ):
__lowercase : Dict = "data2vec-text"
def __init__( self , A_=30_522 , A_=768 , A_=12 , A_=12 , A_=3_072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=2 , A_=0.02 , A_=1e-12 , A_=1 , A_=0 , A_=2 , A_="absolute" , A_=True , A_=None , **A_ , ) -> Any:
"""simple docstring"""
super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ )
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = hidden_act
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
UpperCamelCase = position_embedding_type
UpperCamelCase = use_cache
UpperCamelCase = classifier_dropout
class lowercase ( _SCREAMING_SNAKE_CASE ):
@property
def __UpperCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
UpperCamelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
UpperCamelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 3 | 0 |
import re
def A ( lowercase ) -> str:
'''simple docstring'''
if len(re.findall('[ATCG]' , lowercase ) ) != len(lowercase ):
raise ValueError('Invalid Strand' )
return dna.translate(dna.maketrans('ATCG' , 'TAGC' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 706 |
from random import shuffle
import tensorflow as tf
from numpy import array
def A ( lowercase , lowercase ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase = int(lowercase )
assert noofclusters < len(lowercase )
# Find out the dimensionality
UpperCamelCase = len(vectors[0] )
# Will help select random centroids from among the available vectors
UpperCamelCase = list(range(len(lowercase ) ) )
shuffle(lowercase )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
UpperCamelCase = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
UpperCamelCase = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
UpperCamelCase = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(lowercase )
]
##These nodes will assign the centroid Variables the appropriate
##values
UpperCamelCase = tf.placeholder('float64' , [dim] )
UpperCamelCase = []
for centroid in centroids:
cent_assigns.append(tf.assign(lowercase , lowercase ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
UpperCamelCase = [tf.Variable(0 ) for i in range(len(lowercase ) )]
##These nodes will assign an assignment Variable the appropriate
##value
UpperCamelCase = tf.placeholder('int32' )
UpperCamelCase = []
for assignment in assignments:
cluster_assigns.append(tf.assign(lowercase , lowercase ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
UpperCamelCase = tf.placeholder('float' , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
UpperCamelCase = tf.reduce_mean(lowercase , 0 )
##Node for computing Euclidean distances
# Placeholders for input
UpperCamelCase = tf.placeholder('float' , [dim] )
UpperCamelCase = tf.placeholder('float' , [dim] )
UpperCamelCase = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(lowercase , lowercase ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
UpperCamelCase = tf.placeholder('float' , [noofclusters] )
UpperCamelCase = tf.argmin(lowercase , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
UpperCamelCase = tf.initialize_all_variables()
# Initialize all variables
sess.run(lowercase )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
UpperCamelCase = 100
for _ in range(lowercase ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(lowercase ) ):
UpperCamelCase = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
UpperCamelCase = [
sess.run(lowercase , feed_dict={va: vect, va: sess.run(lowercase )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
UpperCamelCase = sess.run(
lowercase , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(lowercase ):
# Collect all the vectors assigned to this cluster
UpperCamelCase = [
vectors[i]
for i in range(len(lowercase ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
UpperCamelCase = sess.run(
lowercase , feed_dict={mean_input: array(lowercase )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
UpperCamelCase = sess.run(lowercase )
UpperCamelCase = sess.run(lowercase )
return centroids, assignments
| 3 | 0 |
def A ( lowercase ) -> int:
'''simple docstring'''
stooge(lowercase , 0 , len(lowercase ) - 1 )
return arr
def A ( lowercase , lowercase , lowercase ) -> List[Any]:
'''simple docstring'''
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
UpperCamelCase , UpperCamelCase = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
UpperCamelCase = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(lowercase , lowercase , (h - t) )
# Recursively sort last 2/3 elements
stooge(lowercase , i + t , (lowercase) )
# Recursively sort first 2/3 elements
stooge(lowercase , lowercase , (h - t) )
if __name__ == "__main__":
_UpperCAmelCase : int = input("Enter numbers separated by a comma:\n").strip()
_UpperCAmelCase : Optional[Any] = [int(item) for item in user_input.split(",")]
print(stooge_sort(unsorted))
| 707 |
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
_UpperCAmelCase : Tuple = _symbol_database.Default()
_UpperCAmelCase : List[Any] = _descriptor_pool.Default().AddSerializedFile(
b"\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03"
)
_UpperCAmelCase : int = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, "sentencepiece_model_pb2", _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
_UpperCAmelCase : int = None
_UpperCAmelCase : List[str] = b"H\003"
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
_UpperCAmelCase : Optional[Any] = 45
_UpperCAmelCase : Any = 1_581
_UpperCAmelCase : Tuple = 1_517
_UpperCAmelCase : List[str] = 1_570
_UpperCAmelCase : int = 1_584
_UpperCAmelCase : List[Any] = 1_793
_UpperCAmelCase : Optional[int] = 1_795
_UpperCAmelCase : Any = 1_916
_UpperCAmelCase : Tuple = 1_864
_UpperCAmelCase : List[Any] = 1_905
_UpperCAmelCase : Union[str, Any] = 1_919
_UpperCAmelCase : str = 2_429
_UpperCAmelCase : Any = 2_208
_UpperCAmelCase : Dict = 2_418
_UpperCAmelCase : Optional[Any] = 2_323
_UpperCAmelCase : Tuple = 2_407
# @@protoc_insertion_point(module_scope)
| 3 | 0 |
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
_UpperCAmelCase : Tuple = None
try:
import msvcrt
except ImportError:
_UpperCAmelCase : List[str] = None
try:
import fcntl
except ImportError:
_UpperCAmelCase : int = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
_UpperCAmelCase : Optional[int] = OSError
# Data
# ------------------------------------------------
_UpperCAmelCase : int = [
"Timeout",
"BaseFileLock",
"WindowsFileLock",
"UnixFileLock",
"SoftFileLock",
"FileLock",
]
_UpperCAmelCase : List[str] = "3.0.12"
_UpperCAmelCase : str = None
def A ( ) -> Dict:
'''simple docstring'''
global _logger
UpperCamelCase = _logger or logging.getLogger(__name__ )
return _logger
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ ) -> Tuple:
"""simple docstring"""
UpperCamelCase = lock_file
return None
def __str__( self ) -> Any:
"""simple docstring"""
UpperCamelCase = F'''The file lock \'{self.lock_file}\' could not be acquired.'''
return temp
class lowercase :
def __init__( self , A_ ) -> str:
"""simple docstring"""
UpperCamelCase = lock
return None
def __enter__( self ) -> Optional[int]:
"""simple docstring"""
return self.lock
def __exit__( self , A_ , A_ , A_ ) -> List[str]:
"""simple docstring"""
self.lock.release()
return None
class lowercase :
def __init__( self , A_ , A_=-1 , A_=None ) -> int:
"""simple docstring"""
UpperCamelCase = max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
UpperCamelCase = self.hash_filename_if_too_long(A_ , A_ )
# The path to the lock file.
UpperCamelCase = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
UpperCamelCase = None
# The default timeout value.
UpperCamelCase = timeout
# We use this lock primarily for the lock counter.
UpperCamelCase = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
UpperCamelCase = 0
return None
@property
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
return self._lock_file
@property
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
return self._timeout
@timeout.setter
def __UpperCamelCase ( self , A_ ) -> int:
"""simple docstring"""
UpperCamelCase = float(A_ )
return None
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
raise NotImplementedError()
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
raise NotImplementedError()
@property
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
return self._lock_file_fd is not None
def __UpperCamelCase ( self , A_=None , A_=0.05 ) -> List[str]:
"""simple docstring"""
# Use the default timeout, if no timeout is provided.
if timeout is None:
UpperCamelCase = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
UpperCamelCase = id(self )
UpperCamelCase = self._lock_file
UpperCamelCase = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(F'''Attempting to acquire lock {lock_id} on {lock_filename}''' )
self._acquire()
if self.is_locked:
logger().debug(F'''Lock {lock_id} acquired on {lock_filename}''' )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(F'''Timeout on acquiring lock {lock_id} on {lock_filename}''' )
raise Timeout(self._lock_file )
else:
logger().debug(
F'''Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...''' )
time.sleep(A_ )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
UpperCamelCase = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def __UpperCamelCase ( self , A_=False ) -> Any:
"""simple docstring"""
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
UpperCamelCase = id(self )
UpperCamelCase = self._lock_file
logger().debug(F'''Attempting to release lock {lock_id} on {lock_filename}''' )
self._release()
UpperCamelCase = 0
logger().debug(F'''Lock {lock_id} released on {lock_filename}''' )
return None
def __enter__( self ) -> List[str]:
"""simple docstring"""
self.acquire()
return self
def __exit__( self , A_ , A_ , A_ ) -> Optional[int]:
"""simple docstring"""
self.release()
return None
def __del__( self ) -> str:
"""simple docstring"""
self.release(force=A_ )
return None
def __UpperCamelCase ( self , A_ , A_ ) -> str:
"""simple docstring"""
UpperCamelCase = os.path.basename(A_ )
if len(A_ ) > max_length and max_length > 0:
UpperCamelCase = os.path.dirname(A_ )
UpperCamelCase = str(hash(A_ ) )
UpperCamelCase = filename[: max_length - len(A_ ) - 8] + '...' + hashed_filename + '.lock'
return os.path.join(A_ , A_ )
else:
return path
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ , A_=-1 , A_=None ) -> List[str]:
"""simple docstring"""
from .file_utils import relative_to_absolute_path
super().__init__(A_ , timeout=A_ , max_filename_length=A_ )
UpperCamelCase = '\\\\?\\' + relative_to_absolute_path(self.lock_file )
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
UpperCamelCase = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
UpperCamelCase = os.open(self._lock_file , A_ )
except OSError:
pass
else:
try:
msvcrt.locking(A_ , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(A_ )
else:
UpperCamelCase = fd
return None
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = self._lock_file_fd
UpperCamelCase = None
msvcrt.locking(A_ , msvcrt.LK_UNLCK , 1 )
os.close(A_ )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ , A_=-1 , A_=None ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = os.statvfs(os.path.dirname(A_ ) ).f_namemax
super().__init__(A_ , timeout=A_ , max_filename_length=A_ )
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = os.O_RDWR | os.O_CREAT | os.O_TRUNC
UpperCamelCase = os.open(self._lock_file , A_ )
try:
fcntl.flock(A_ , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(A_ )
else:
UpperCamelCase = fd
return None
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = self._lock_file_fd
UpperCamelCase = None
fcntl.flock(A_ , fcntl.LOCK_UN )
os.close(A_ )
return None
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
UpperCamelCase = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
UpperCamelCase = os.open(self._lock_file , A_ )
except OSError:
pass
else:
UpperCamelCase = fd
return None
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
os.close(self._lock_file_fd )
UpperCamelCase = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
_UpperCAmelCase : str = None
if msvcrt:
_UpperCAmelCase : Tuple = WindowsFileLock
elif fcntl:
_UpperCAmelCase : Any = UnixFileLock
else:
_UpperCAmelCase : int = SoftFileLock
if warnings is not None:
warnings.warn("only soft file lock is available")
| 708 |
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class lowercase ( unittest.TestCase ):
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
# A mock response for an HTTP head request to emulate server down
UpperCamelCase = mock.Mock()
UpperCamelCase = 500
UpperCamelCase = {}
UpperCamelCase = HTTPError
UpperCamelCase = {}
# Download this model to make sure it's in the cache.
UpperCamelCase = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=A_ ) as mock_head:
UpperCamelCase = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
# A mock response for an HTTP head request to emulate server down
UpperCamelCase = mock.Mock()
UpperCamelCase = 500
UpperCamelCase = {}
UpperCamelCase = HTTPError
UpperCamelCase = {}
# Download this model to make sure it's in the cache.
UpperCamelCase = GPTaTokenizerFast.from_pretrained('gpt2' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=A_ ) as mock_head:
UpperCamelCase = GPTaTokenizerFast.from_pretrained('gpt2' )
# This check we did call the fake head request
mock_head.assert_called()
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
# This test is for deprecated behavior and can be removed in v5
try:
UpperCamelCase = tempfile.mktemp()
with open(A_ , 'wb' ) as f:
http_get('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' , A_ )
UpperCamelCase = AlbertTokenizer.from_pretrained(A_ )
finally:
os.remove(A_ )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile('tokenizer.json' ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open('tokenizer.json' , 'wb' ) as f:
http_get('https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json' , A_ )
UpperCamelCase = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size , 1_000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove('tokenizer.json' )
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
# This test is for deprecated behavior and can be removed in v5
UpperCamelCase = AlbertTokenizer.from_pretrained('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' )
@is_staging_test
class lowercase ( unittest.TestCase ):
__lowercase : int = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def __UpperCamelCase ( cls ) -> Tuple:
"""simple docstring"""
UpperCamelCase = TOKEN
HfFolder.save_token(A_ )
@classmethod
def __UpperCamelCase ( cls ) -> Optional[int]:
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id='test-tokenizer' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-tokenizer-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-tokenizer' )
except HTTPError:
pass
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase = os.path.join(A_ , 'vocab.txt' )
with open(A_ , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
UpperCamelCase = BertTokenizer(A_ )
tokenizer.push_to_hub('test-tokenizer' , use_auth_token=self._token )
UpperCamelCase = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id='test-tokenizer' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(A_ , repo_id='test-tokenizer' , push_to_hub=A_ , use_auth_token=self._token )
UpperCamelCase = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase = os.path.join(A_ , 'vocab.txt' )
with open(A_ , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
UpperCamelCase = BertTokenizer(A_ )
tokenizer.push_to_hub('valid_org/test-tokenizer-org' , use_auth_token=self._token )
UpperCamelCase = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-tokenizer-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
A_ , repo_id='valid_org/test-tokenizer-org' , push_to_hub=A_ , use_auth_token=self._token )
UpperCamelCase = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
@require_tokenizers
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase = os.path.join(A_ , 'vocab.txt' )
with open(A_ , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
UpperCamelCase = CustomTokenizer(A_ )
# No fast custom tokenizer
tokenizer.push_to_hub('test-dynamic-tokenizer' , use_auth_token=self._token )
UpperCamelCase = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=A_ )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizer' )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase = os.path.join(A_ , 'vocab.txt' )
with open(A_ , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
UpperCamelCase = BertTokenizerFast.from_pretrained(A_ )
bert_tokenizer.save_pretrained(A_ )
UpperCamelCase = CustomTokenizerFast.from_pretrained(A_ )
tokenizer.push_to_hub('test-dynamic-tokenizer' , use_auth_token=self._token )
UpperCamelCase = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=A_ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizerFast' )
UpperCamelCase = AutoTokenizer.from_pretrained(
F'''{USER}/test-dynamic-tokenizer''' , use_fast=A_ , trust_remote_code=A_ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizer' )
class lowercase ( unittest.TestCase ):
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = Trie()
trie.add('Hello 友達' )
self.assertEqual(trie.data , {'H': {'e': {'l': {'l': {'o': {' ': {'友': {'達': {'': 1}}}}}}}}} )
trie.add('Hello' )
trie.data
self.assertEqual(trie.data , {'H': {'e': {'l': {'l': {'o': {'': 1, ' ': {'友': {'達': {'': 1}}}}}}}}} )
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
UpperCamelCase = Trie()
self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) , ['[CLS] This is a extra_id_100'] )
trie.add('[CLS]' )
trie.add('extra_id_1' )
trie.add('extra_id_100' )
self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) , ['[CLS]', ' This is a ', 'extra_id_100'] )
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = Trie()
trie.add('A' )
self.assertEqual(trie.split('ABC' ) , ['A', 'BC'] )
self.assertEqual(trie.split('BCA' ) , ['BC', 'A'] )
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = Trie()
trie.add('TOKEN]' )
trie.add('[SPECIAL_TOKEN]' )
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) , ['This is something ', '[SPECIAL_TOKEN]'] )
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = Trie()
trie.add('A' )
trie.add('P' )
trie.add('[SPECIAL_TOKEN]' )
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) , ['This is something ', '[SPECIAL_TOKEN]'] )
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = Trie()
trie.add('AB' )
trie.add('B' )
trie.add('C' )
self.assertEqual(trie.split('ABC' ) , ['AB', 'C'] )
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = Trie()
trie.add('ABC' )
trie.add('B' )
trie.add('CD' )
self.assertEqual(trie.split('ABCD' ) , ['ABC', 'D'] )
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
# Even if the offsets are wrong, we necessarily output correct string
# parts.
UpperCamelCase = Trie()
UpperCamelCase = trie.cut_text('ABC' , [0, 0, 2, 1, 2, 3] )
self.assertEqual(A_ , ['AB', 'C'] )
| 3 | 0 |
import math
import sys
def A ( lowercase ) -> str:
'''simple docstring'''
UpperCamelCase = ''
try:
with open(lowercase , 'rb' ) as binary_file:
UpperCamelCase = binary_file.read()
for dat in data:
UpperCamelCase = f'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print('File not accessible' )
sys.exit()
def A ( lowercase ) -> str:
'''simple docstring'''
UpperCamelCase = {'0': '0', '1': '1'}
UpperCamelCase , UpperCamelCase = '', ''
UpperCamelCase = len(lowercase )
for i in range(len(lowercase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
UpperCamelCase = lexicon[curr_string]
result += last_match_id
UpperCamelCase = last_match_id + '0'
if math.loga(lowercase ).is_integer():
UpperCamelCase = {}
for curr_key in list(lowercase ):
UpperCamelCase = lexicon.pop(lowercase )
UpperCamelCase = new_lex
UpperCamelCase = last_match_id + '1'
index += 1
UpperCamelCase = ''
return result
def A ( lowercase , lowercase ) -> None:
'''simple docstring'''
UpperCamelCase = 8
try:
with open(lowercase , 'wb' ) as opened_file:
UpperCamelCase = [
to_write[i : i + byte_length]
for i in range(0 , len(lowercase ) , lowercase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('10000000' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(lowercase , 2 ).to_bytes(1 , byteorder='big' ) )
except OSError:
print('File not accessible' )
sys.exit()
def A ( lowercase ) -> str:
'''simple docstring'''
UpperCamelCase = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
UpperCamelCase = data_bits[counter:]
UpperCamelCase = data_bits[counter + 1 :]
return data_bits
def A ( lowercase , lowercase ) -> None:
'''simple docstring'''
UpperCamelCase = read_file_binary(lowercase )
UpperCamelCase = remove_prefix(lowercase )
UpperCamelCase = decompress_data(lowercase )
write_file_binary(lowercase , lowercase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 709 |
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def A ( lowercase , lowercase ) -> Optional[int]:
'''simple docstring'''
assert isinstance(lowercase , lowercase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def A ( lowercase , lowercase , lowercase ) -> Tuple:
'''simple docstring'''
UpperCamelCase = tmp_path / 'cache'
UpperCamelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCamelCase = ParquetDatasetReader(lowercase , cache_dir=lowercase , keep_in_memory=lowercase ).read()
_check_parquet_dataset(lowercase , lowercase )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def A ( lowercase , lowercase , lowercase ) -> Tuple:
'''simple docstring'''
UpperCamelCase = tmp_path / 'cache'
UpperCamelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
UpperCamelCase = features.copy() if features else default_expected_features
UpperCamelCase = (
Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase = ParquetDatasetReader(lowercase , features=lowercase , cache_dir=lowercase ).read()
_check_parquet_dataset(lowercase , lowercase )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def A ( lowercase , lowercase , lowercase ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase = tmp_path / 'cache'
UpperCamelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
UpperCamelCase = ParquetDatasetReader(lowercase , cache_dir=lowercase , split=lowercase ).read()
_check_parquet_dataset(lowercase , lowercase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('path_type' , [str, list] )
def A ( lowercase , lowercase , lowercase ) -> Union[str, Any]:
'''simple docstring'''
if issubclass(lowercase , lowercase ):
UpperCamelCase = parquet_path
elif issubclass(lowercase , lowercase ):
UpperCamelCase = [parquet_path]
UpperCamelCase = tmp_path / 'cache'
UpperCamelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
UpperCamelCase = ParquetDatasetReader(lowercase , cache_dir=lowercase ).read()
_check_parquet_dataset(lowercase , lowercase )
def A ( lowercase , lowercase , lowercase=("train",) ) -> Tuple:
'''simple docstring'''
assert isinstance(lowercase , lowercase )
for split in splits:
UpperCamelCase = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def A ( lowercase , lowercase , lowercase ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase = tmp_path / 'cache'
UpperCamelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCamelCase = ParquetDatasetReader(
{'train': parquet_path} , cache_dir=lowercase , keep_in_memory=lowercase ).read()
_check_parquet_datasetdict(lowercase , lowercase )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def A ( lowercase , lowercase , lowercase ) -> List[Any]:
'''simple docstring'''
UpperCamelCase = tmp_path / 'cache'
UpperCamelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
UpperCamelCase = features.copy() if features else default_expected_features
UpperCamelCase = (
Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase = ParquetDatasetReader({'train': parquet_path} , features=lowercase , cache_dir=lowercase ).read()
_check_parquet_datasetdict(lowercase , lowercase )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def A ( lowercase , lowercase , lowercase ) -> Union[str, Any]:
'''simple docstring'''
if split:
UpperCamelCase = {split: parquet_path}
else:
UpperCamelCase = 'train'
UpperCamelCase = {'train': parquet_path, 'test': parquet_path}
UpperCamelCase = tmp_path / 'cache'
UpperCamelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
UpperCamelCase = ParquetDatasetReader(lowercase , cache_dir=lowercase ).read()
_check_parquet_datasetdict(lowercase , lowercase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def A ( lowercase , lowercase ) -> List[Any]:
'''simple docstring'''
UpperCamelCase = ParquetDatasetWriter(lowercase , tmp_path / 'foo.parquet' )
assert writer.write() > 0
UpperCamelCase = pq.ParquetFile(tmp_path / 'foo.parquet' )
UpperCamelCase = pf.read()
assert dataset.data.table == output_table
def A ( lowercase , lowercase ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase = str(shared_datadir / 'test_image_rgb.jpg' )
UpperCamelCase = {'image': [image_path]}
UpperCamelCase = Features({'image': Image()} )
UpperCamelCase = Dataset.from_dict(lowercase , features=lowercase )
UpperCamelCase = ParquetDatasetWriter(lowercase , tmp_path / 'foo.parquet' )
assert writer.write() > 0
UpperCamelCase = Dataset.from_parquet(str(tmp_path / 'foo.parquet' ) )
assert dataset.features == reloaded_dataset.features
UpperCamelCase = ParquetDatasetReader(str(tmp_path / 'foo.parquet' ) , streaming=lowercase ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
'feature, expected' , [
(Features({'foo': Value('int32' )} ), None),
(Features({'image': Image(), 'foo': Value('int32' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({'nested': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def A ( lowercase , lowercase ) -> Union[str, Any]:
'''simple docstring'''
assert get_writer_batch_size(lowercase ) == expected
| 3 | 0 |
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class lowercase :
def __init__( self ) -> Any:
"""simple docstring"""
UpperCamelCase = ''
UpperCamelCase = ''
UpperCamelCase = []
UpperCamelCase = 0
UpperCamelCase = 256
UpperCamelCase = 0
UpperCamelCase = 0
UpperCamelCase = 0
UpperCamelCase = 0
def __UpperCamelCase ( self , A_ ) -> Dict:
"""simple docstring"""
UpperCamelCase = cva.imread(A_ , 0 )
UpperCamelCase = copy.deepcopy(self.img )
UpperCamelCase , UpperCamelCase , UpperCamelCase = plt.hist(self.img.ravel() , 256 , [0, 256] , label='x' )
UpperCamelCase = np.sum(A_ )
for i in range(len(A_ ) ):
UpperCamelCase = x[i] / self.k
self.sk += prk
UpperCamelCase = (self.L - 1) * self.sk
if self.rem != 0:
UpperCamelCase = int(last % last )
UpperCamelCase = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(A_ )
UpperCamelCase = int(np.ma.count(self.img ) / self.img[1].size )
UpperCamelCase = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
UpperCamelCase = self.img[j][i]
if num != self.last_list[num]:
UpperCamelCase = self.last_list[num]
cva.imwrite('output_data/output.jpg' , self.img )
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
plt.hist(self.img.ravel() , 256 , [0, 256] )
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
cva.imshow('Output-Image' , self.img )
cva.imshow('Input-Image' , self.original_image )
cva.waitKey(5_000 )
cva.destroyAllWindows()
if __name__ == "__main__":
_UpperCAmelCase : Any = os.path.join(os.path.basename(__file__), "image_data/input.jpg")
_UpperCAmelCase : Optional[int] = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 710 |
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class lowercase ( unittest.TestCase ):
def __init__( self , A_ , A_=7 , A_=3 , A_=18 , A_=30 , A_=400 , A_=True , A_=None , A_=True , A_=False , A_=True , A_=True , A_=[0.5, 0.5, 0.5] , A_=[0.5, 0.5, 0.5] , ) -> Tuple:
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = num_channels
UpperCamelCase = image_size
UpperCamelCase = min_resolution
UpperCamelCase = max_resolution
UpperCamelCase = do_resize
UpperCamelCase = size if size is not None else {'height': 18, 'width': 20}
UpperCamelCase = do_thumbnail
UpperCamelCase = do_align_axis
UpperCamelCase = do_pad
UpperCamelCase = do_normalize
UpperCamelCase = image_mean
UpperCamelCase = image_std
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class lowercase ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
__lowercase : Optional[int] = DonutImageProcessor if is_vision_available() else None
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = DonutImageProcessingTester(self )
@property
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , 'do_resize' ) )
self.assertTrue(hasattr(A_ , 'size' ) )
self.assertTrue(hasattr(A_ , 'do_thumbnail' ) )
self.assertTrue(hasattr(A_ , 'do_align_long_axis' ) )
self.assertTrue(hasattr(A_ , 'do_pad' ) )
self.assertTrue(hasattr(A_ , 'do_normalize' ) )
self.assertTrue(hasattr(A_ , 'image_mean' ) )
self.assertTrue(hasattr(A_ , 'image_std' ) )
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 20} )
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
# Previous config had dimensions in (width, height) order
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {'height': 84, 'width': 42} )
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
pass
@is_flaky()
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
# Initialize image_processing
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , Image.Image )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
UpperCamelCase = image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
# Initialize image_processing
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , np.ndarray )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
UpperCamelCase = image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
# Initialize image_processing
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
UpperCamelCase = image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
| 3 | 0 |
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def A ( lowercase , lowercase , lowercase ) -> int:
'''simple docstring'''
UpperCamelCase = ('dense.weight', 'attention.self.query', 'attention.self.key', 'attention.self.value')
UpperCamelCase = (
('layer.', 'layer_'),
('word_embeddings.weight', 'word_embeddings'),
('position_embeddings.weight', 'position_embeddings'),
('token_type_embeddings.weight', 'token_type_embeddings'),
('.', '/'),
('LayerNorm/weight', 'LayerNorm/gamma'),
('LayerNorm/bias', 'LayerNorm/beta'),
('weight', 'kernel'),
)
if not os.path.isdir(lowercase ):
os.makedirs(lowercase )
UpperCamelCase = model.state_dict()
def to_tf_var_name(lowercase ):
for patt, repl in iter(lowercase ):
UpperCamelCase = name.replace(lowercase , lowercase )
return f'''bert/{name}'''
def create_tf_var(lowercase , lowercase , lowercase ):
UpperCamelCase = tf.dtypes.as_dtype(tensor.dtype )
UpperCamelCase = tf.get_variable(dtype=lowercase , shape=tensor.shape , name=lowercase , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(lowercase )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
UpperCamelCase = to_tf_var_name(lowercase )
UpperCamelCase = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
UpperCamelCase = torch_tensor.T
UpperCamelCase = create_tf_var(tensor=lowercase , name=lowercase , session=lowercase )
tf.keras.backend.set_value(lowercase , lowercase )
UpperCamelCase = session.run(lowercase )
print(f'''Successfully created {tf_name}: {np.allclose(lowercase , lowercase )}''' )
UpperCamelCase = tf.train.Saver(tf.trainable_variables() )
saver.save(lowercase , os.path.join(lowercase , model_name.replace('-' , '_' ) + '.ckpt' ) )
def A ( lowercase=None ) -> List[str]:
'''simple docstring'''
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('--model_name' , type=lowercase , required=lowercase , help='model name e.g. bert-base-uncased' )
parser.add_argument(
'--cache_dir' , type=lowercase , default=lowercase , required=lowercase , help='Directory containing pytorch model' )
parser.add_argument('--pytorch_model_path' , type=lowercase , required=lowercase , help='/path/to/<pytorch-model-name>.bin' )
parser.add_argument('--tf_cache_dir' , type=lowercase , required=lowercase , help='Directory in which to save tensorflow model' )
UpperCamelCase = parser.parse_args(lowercase )
UpperCamelCase = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=lowercase , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 711 |
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase : Dict = logging.get_logger(__name__)
_UpperCAmelCase : Optional[Any] = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
_UpperCAmelCase : str = {
"vocab_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"
},
"merges_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"
},
"tokenizer_config_file": {
"facebook/blenderbot_small-90M": (
"https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"
)
},
}
_UpperCAmelCase : List[str] = {"facebook/blenderbot_small-90M": 512}
def A ( lowercase ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase = set()
UpperCamelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCamelCase = char
UpperCamelCase = set(lowercase )
return pairs
class lowercase ( _SCREAMING_SNAKE_CASE ):
__lowercase : Optional[Any] = VOCAB_FILES_NAMES
__lowercase : Tuple = PRETRAINED_VOCAB_FILES_MAP
__lowercase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : Any = ["input_ids", "attention_mask"]
def __init__( self , A_ , A_ , A_="__start__" , A_="__end__" , A_="__unk__" , A_="__null__" , **A_ , ) -> List[Any]:
"""simple docstring"""
super().__init__(unk_token=A_ , bos_token=A_ , eos_token=A_ , pad_token=A_ , **A_ )
with open(A_ , encoding='utf-8' ) as vocab_handle:
UpperCamelCase = json.load(A_ )
UpperCamelCase = {v: k for k, v in self.encoder.items()}
with open(A_ , encoding='utf-8' ) as merges_handle:
UpperCamelCase = merges_handle.read().split('\n' )[1:-1]
UpperCamelCase = [tuple(merge.split() ) for merge in merges]
UpperCamelCase = dict(zip(A_ , range(len(A_ ) ) ) )
UpperCamelCase = {}
@property
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
return len(self.encoder )
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def __UpperCamelCase ( self , A_ ) -> str:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
UpperCamelCase = re.sub('([.,!?()])' , r' \1' , A_ )
UpperCamelCase = re.sub('(\')' , r' \1 ' , A_ )
UpperCamelCase = re.sub(r'\s{2,}' , ' ' , A_ )
if "\n" in token:
UpperCamelCase = token.replace('\n' , ' __newln__' )
UpperCamelCase = token.split(' ' )
UpperCamelCase = []
for token in tokens:
if not len(A_ ):
continue
UpperCamelCase = token.lower()
UpperCamelCase = tuple(A_ )
UpperCamelCase = tuple(list(word[:-1] ) + [word[-1] + '</w>'] )
UpperCamelCase = get_pairs(A_ )
if not pairs:
words.append(A_ )
continue
while True:
UpperCamelCase = min(A_ , key=lambda A_ : self.bpe_ranks.get(A_ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
UpperCamelCase , UpperCamelCase = bigram
UpperCamelCase = []
UpperCamelCase = 0
while i < len(A_ ):
try:
UpperCamelCase = word.index(A_ , A_ )
new_word.extend(word[i:j] )
UpperCamelCase = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(A_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCamelCase = tuple(A_ )
UpperCamelCase = new_word
if len(A_ ) == 1:
break
else:
UpperCamelCase = get_pairs(A_ )
UpperCamelCase = '@@ '.join(A_ )
UpperCamelCase = word[:-4]
UpperCamelCase = word
words.append(A_ )
return " ".join(A_ )
def __UpperCamelCase ( self , A_ ) -> List[str]:
"""simple docstring"""
UpperCamelCase = []
UpperCamelCase = re.findall(r'\S+\n?' , A_ )
for token in words:
split_tokens.extend(list(self.bpe(A_ ).split(' ' ) ) )
return split_tokens
def __UpperCamelCase ( self , A_ ) -> int:
"""simple docstring"""
UpperCamelCase = token.lower()
return self.encoder.get(A_ , self.encoder.get(self.unk_token ) )
def __UpperCamelCase ( self , A_ ) -> str:
"""simple docstring"""
return self.decoder.get(A_ , self.unk_token )
def __UpperCamelCase ( self , A_ ) -> str:
"""simple docstring"""
UpperCamelCase = ' '.join(A_ ).replace('@@ ' , '' ).strip()
return out_string
def __UpperCamelCase ( self , A_ , A_ = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(A_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCamelCase = os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
UpperCamelCase = os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(A_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=A_ , ensure_ascii=A_ ) + '\n' )
UpperCamelCase = 0
with open(A_ , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda A_ : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
' Please check that the tokenizer is not corrupted!' )
UpperCamelCase = token_index
writer.write(' '.join(A_ ) + '\n' )
index += 1
return vocab_file, merge_file
| 3 | 0 |
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
_UpperCAmelCase : Dict = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
_UpperCAmelCase : Any = direct_transformers_import(PATH_TO_TRANSFORMERS)
_UpperCAmelCase : List[str] = transformers.models.auto.configuration_auto.CONFIG_MAPPING
_UpperCAmelCase : str = {
# used to compute the property `self.chunk_length`
"EncodecConfig": ["overlap"],
# used as `self.bert_model = BertModel(config, ...)`
"DPRConfig": True,
# not used in modeling files, but it's an important information
"FSMTConfig": ["langs"],
# used internally in the configuration class file
"GPTNeoConfig": ["attention_types"],
# used internally in the configuration class file
"EsmConfig": ["is_folding_model"],
# used during training (despite we don't have training script for these models yet)
"Mask2FormerConfig": ["ignore_value"],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
"OneFormerConfig": ["ignore_value", "norm"],
# used during preprocessing and collation, see `collating_graphormer.py`
"GraphormerConfig": ["spatial_pos_max"],
# used internally in the configuration class file
"T5Config": ["feed_forward_proj"],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
"MT5Config": ["feed_forward_proj", "tokenizer_class"],
"UMT5Config": ["feed_forward_proj", "tokenizer_class"],
# used internally in the configuration class file
"LongT5Config": ["feed_forward_proj"],
# used internally in the configuration class file
"SwitchTransformersConfig": ["feed_forward_proj"],
# having default values other than `1e-5` - we can't fix them without breaking
"BioGptConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"GLPNConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"SegformerConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"CvtConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"PerceiverConfig": ["layer_norm_eps"],
# used internally to calculate the feature size
"InformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate the feature size
"TimeSeriesTransformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate the feature size
"AutoformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate `mlp_dim`
"SamVisionConfig": ["mlp_ratio"],
# For (head) training, but so far not implemented
"ClapAudioConfig": ["num_classes"],
# Not used, but providing useful information to users
"SpeechT5HifiGanConfig": ["sampling_rate"],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
"CLIPSegConfig": True,
"DeformableDetrConfig": True,
"DetaConfig": True,
"DinatConfig": True,
"DonutSwinConfig": True,
"EfficientFormerConfig": True,
"FSMTConfig": True,
"JukeboxConfig": True,
"LayoutLMv2Config": True,
"MaskFormerSwinConfig": True,
"MT5Config": True,
"NatConfig": True,
"OneFormerConfig": True,
"PerceiverConfig": True,
"RagConfig": True,
"SpeechT5Config": True,
"SwinConfig": True,
"Swin2SRConfig": True,
"Swinv2Config": True,
"SwitchTransformersConfig": True,
"TableTransformerConfig": True,
"TapasConfig": True,
"TransfoXLConfig": True,
"UniSpeechConfig": True,
"UniSpeechSatConfig": True,
"WavLMConfig": True,
"WhisperConfig": True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
"JukeboxPriorConfig": True,
# TODO: @Younes (for `is_decoder`)
"Pix2StructTextConfig": True,
}
)
def A ( lowercase , lowercase , lowercase , lowercase ) -> Dict:
'''simple docstring'''
UpperCamelCase = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
f"config.{attribute}" in modeling_source
or f"getattr(config, \"{attribute}\"" in modeling_source
or f"getattr(self.config, \"{attribute}\"" in modeling_source
):
UpperCamelCase = True
# Deal with multi-line cases
elif (
re.search(
Rf"getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*\"{attribute}\"" , lowercase , )
is not None
):
UpperCamelCase = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
UpperCamelCase = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
UpperCamelCase = [
'bos_index',
'eos_index',
'pad_index',
'unk_index',
'mask_index',
'image_size',
'use_cache',
'out_features',
'out_indices',
]
UpperCamelCase = ['encoder_no_repeat_ngram_size']
# Special cases to be allowed
UpperCamelCase = True
if not attribute_used:
UpperCamelCase = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
UpperCamelCase = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
UpperCamelCase = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
UpperCamelCase = True
elif attribute.endswith('_token_id' ):
UpperCamelCase = True
# configuration class specific cases
if not case_allowed:
UpperCamelCase = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
UpperCamelCase = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def A ( lowercase ) -> List[Any]:
'''simple docstring'''
UpperCamelCase = dict(inspect.signature(config_class.__init__ ).parameters )
UpperCamelCase = [x for x in list(signature.keys() ) if x not in ['self', 'kwargs']]
UpperCamelCase = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
UpperCamelCase = {}
if len(config_class.attribute_map ) > 0:
UpperCamelCase = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
UpperCamelCase = inspect.getsourcefile(lowercase )
UpperCamelCase = os.path.dirname(lowercase )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
UpperCamelCase = [os.path.join(lowercase , lowercase ) for fn in os.listdir(lowercase ) if fn.startswith('modeling_' )]
# Get the source code strings
UpperCamelCase = []
for path in modeling_paths:
if os.path.isfile(lowercase ):
with open(lowercase ) as fp:
modeling_sources.append(fp.read() )
UpperCamelCase = []
for config_param, default_value in zip(lowercase , lowercase ):
# `attributes` here is all the variant names for `config_param`
UpperCamelCase = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(lowercase , lowercase , lowercase , lowercase ):
unused_attributes.append(attributes[0] )
return sorted(lowercase )
def A ( ) -> Tuple:
'''simple docstring'''
UpperCamelCase = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
UpperCamelCase = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda lowercase : inspect.isclass(lowercase )
and issubclass(lowercase , lowercase )
and inspect.getmodule(lowercase ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
UpperCamelCase = check_config_attributes_being_used(lowercase )
if len(lowercase ) > 0:
UpperCamelCase = unused_attributes
if len(lowercase ) > 0:
UpperCamelCase = 'The following configuration classes contain unused attributes in the corresponding modeling files:\n'
for name, attributes in configs_with_unused_attributes.items():
error += f"{name}: {attributes}\n"
raise ValueError(lowercase )
if __name__ == "__main__":
check_config_attributes()
| 712 |
def A ( lowercase ) -> str:
'''simple docstring'''
UpperCamelCase = int(lowercase )
if decimal in (0, 1): # Exit cases for the recursion
return str(lowercase )
UpperCamelCase , UpperCamelCase = divmod(lowercase , 2 )
return binary_recursive(lowercase ) + str(lowercase )
def A ( lowercase ) -> str:
'''simple docstring'''
UpperCamelCase = str(lowercase ).strip()
if not number:
raise ValueError('No input value was provided' )
UpperCamelCase = '-' if number.startswith('-' ) else ''
UpperCamelCase = number.lstrip('-' )
if not number.isnumeric():
raise ValueError('Input value is not an integer' )
return f'''{negative}0b{binary_recursive(int(lowercase ) )}'''
if __name__ == "__main__":
from doctest import testmod
testmod()
| 3 | 0 |
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
_UpperCAmelCase : List[Any] = re.compile(R"^(?P<major>\d+)" R"\.(?P<minor>\d+)" R"\.(?P<patch>\d+)$")
@total_ordering
@dataclass
class lowercase :
__lowercase : str
__lowercase : Optional[str] = None
__lowercase : Optional[Union[str, int]] = None
__lowercase : Optional[Union[str, int]] = None
__lowercase : Optional[Union[str, int]] = None
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase , UpperCamelCase , UpperCamelCase = _str_to_version_tuple(self.version_str )
def __repr__( self ) -> Optional[Any]:
"""simple docstring"""
return F'''{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}'''
@property
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
return self.major, self.minor, self.patch
def __UpperCamelCase ( self , A_ ) -> Optional[Any]:
"""simple docstring"""
if isinstance(A_ , A_ ):
return Version(A_ )
elif isinstance(A_ , A_ ):
return other
raise TypeError(F'''{other} (type {type(A_ )}) cannot be compared to version.''' )
def __eq__( self , A_ ) -> int:
"""simple docstring"""
try:
UpperCamelCase = self._validate_operand(A_ )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self , A_ ) -> Any:
"""simple docstring"""
UpperCamelCase = self._validate_operand(A_ )
return self.tuple < other.tuple
def __hash__( self ) -> List[Any]:
"""simple docstring"""
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def __UpperCamelCase ( cls , A_ ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
return self.version_str
def A ( lowercase ) -> str:
'''simple docstring'''
UpperCamelCase = _VERSION_REG.match(lowercase )
if not res:
raise ValueError(f'''Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.''' )
return tuple(int(lowercase ) for v in [res.group('major' ), res.group('minor' ), res.group('patch' )] )
def A ( lowercase ) -> int:
'''simple docstring'''
return ".".join(str(lowercase ) for v in version_tuple )
| 713 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_UpperCAmelCase : Tuple = logging.get_logger(__name__)
_UpperCAmelCase : Tuple = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.linear_k": "encoder.layers.*.self_attn.linear_k",
"self_attn.linear_v": "encoder.layers.*.self_attn.linear_v",
"self_attn.linear_q": "encoder.layers.*.self_attn.linear_q",
"self_attn.pos_bias_u": "encoder.layers.*.self_attn.pos_bias_u",
"self_attn.pos_bias_v": "encoder.layers.*.self_attn.pos_bias_v",
"self_attn.linear_out": "encoder.layers.*.self_attn.linear_out",
"self_attn.linear_pos": "encoder.layers.*.self_attn.linear_pos",
"self_attn.rotary_emb": "encoder.embed_positions",
"self_attn_layer_norm": "encoder.layers.*.self_attn_layer_norm",
"conv_module.pointwise_conv1": "encoder.layers.*.conv_module.pointwise_conv1",
"conv_module.pointwise_conv2": "encoder.layers.*.conv_module.pointwise_conv2",
"conv_module.depthwise_conv": "encoder.layers.*.conv_module.depthwise_conv",
"conv_module.batch_norm": "encoder.layers.*.conv_module.batch_norm",
"conv_module.layer_norm": "encoder.layers.*.conv_module.layer_norm",
"ffn1.w_1": "encoder.layers.*.ffn1.intermediate_dense",
"ffn1.w_2": "encoder.layers.*.ffn1.output_dense",
"ffn1.layer_norm": "encoder.layers.*.ffn1_layer_norm",
"ffn2.w_1": "encoder.layers.*.ffn2.intermediate_dense",
"ffn2.w_2": "encoder.layers.*.ffn2.output_dense",
"ffn2.layer_norm": "encoder.layers.*.ffn2_layer_norm",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
_UpperCAmelCase : Any = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def A ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> Dict:
'''simple docstring'''
for attribute in key.split('.' ):
UpperCamelCase = getattr(lowercase , lowercase )
if weight_type is not None:
UpperCamelCase = getattr(lowercase , lowercase ).shape
else:
UpperCamelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
UpperCamelCase = value
elif weight_type == "weight_g":
UpperCamelCase = value
elif weight_type == "weight_v":
UpperCamelCase = value
elif weight_type == "bias":
UpperCamelCase = value
elif weight_type == "running_mean":
UpperCamelCase = value
elif weight_type == "running_var":
UpperCamelCase = value
elif weight_type == "num_batches_tracked":
UpperCamelCase = value
elif weight_type == "inv_freq":
UpperCamelCase = value
else:
UpperCamelCase = value
logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def A ( lowercase , lowercase , lowercase ) -> Any:
'''simple docstring'''
UpperCamelCase = []
UpperCamelCase = fairseq_model.state_dict()
UpperCamelCase = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
UpperCamelCase = False
if "conv_layers" in name:
load_conv_layer(
lowercase , lowercase , lowercase , lowercase , hf_model.config.feat_extract_norm == 'group' , )
UpperCamelCase = True
else:
for key, mapped_key in MAPPING.items():
UpperCamelCase = 'wav2vec2_conformer.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
UpperCamelCase = True
if "*" in mapped_key:
UpperCamelCase = name.split(lowercase )[0].split('.' )[-2]
UpperCamelCase = mapped_key.replace('*' , lowercase )
if "pos_bias_u" in name:
UpperCamelCase = None
elif "pos_bias_v" in name:
UpperCamelCase = None
elif "weight_g" in name:
UpperCamelCase = 'weight_g'
elif "weight_v" in name:
UpperCamelCase = 'weight_v'
elif "bias" in name:
UpperCamelCase = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCamelCase = 'weight'
elif "running_mean" in name:
UpperCamelCase = 'running_mean'
elif "inv_freq" in name:
UpperCamelCase = 'inv_freq'
elif "running_var" in name:
UpperCamelCase = 'running_var'
elif "num_batches_tracked" in name:
UpperCamelCase = 'num_batches_tracked'
else:
UpperCamelCase = None
set_recursively(lowercase , lowercase , lowercase , lowercase , lowercase )
continue
if not is_used:
unused_weights.append(lowercase )
logger.warning(f'''Unused weights: {unused_weights}''' )
def A ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase = full_name.split('conv_layers.' )[-1]
UpperCamelCase = name.split('.' )
UpperCamelCase = int(items[0] )
UpperCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
UpperCamelCase = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
UpperCamelCase = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
UpperCamelCase = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
UpperCamelCase = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(lowercase )
@torch.no_grad()
def A ( lowercase , lowercase , lowercase=None , lowercase=None , lowercase=True ) -> int:
'''simple docstring'''
if config_path is not None:
UpperCamelCase = WavaVecaConformerConfig.from_pretrained(lowercase , hidden_act='swish' )
else:
UpperCamelCase = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
UpperCamelCase = 'rotary'
if is_finetuned:
if dict_path:
UpperCamelCase = Dictionary.load(lowercase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCamelCase = target_dict.pad_index
UpperCamelCase = target_dict.bos_index
UpperCamelCase = target_dict.eos_index
UpperCamelCase = len(target_dict.symbols )
UpperCamelCase = os.path.join(lowercase , 'vocab.json' )
if not os.path.isdir(lowercase ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(lowercase ) )
return
os.makedirs(lowercase , exist_ok=lowercase )
UpperCamelCase = target_dict.indices
# fairseq has the <pad> and <s> switched
UpperCamelCase = 0
UpperCamelCase = 1
with open(lowercase , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(lowercase , lowercase )
UpperCamelCase = WavaVecaCTCTokenizer(
lowercase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=lowercase , )
UpperCamelCase = True if config.feat_extract_norm == 'layer' else False
UpperCamelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=lowercase , return_attention_mask=lowercase , )
UpperCamelCase = WavaVecaProcessor(feature_extractor=lowercase , tokenizer=lowercase )
processor.save_pretrained(lowercase )
UpperCamelCase = WavaVecaConformerForCTC(lowercase )
else:
UpperCamelCase = WavaVecaConformerForPreTraining(lowercase )
if is_finetuned:
UpperCamelCase , UpperCamelCase , UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
UpperCamelCase = argparse.Namespace(task='audio_pretraining' )
UpperCamelCase = fairseq.tasks.setup_task(lowercase )
UpperCamelCase , UpperCamelCase , UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowercase )
UpperCamelCase = model[0].eval()
recursively_load_weights(lowercase , lowercase , not is_finetuned )
hf_wavavec.save_pretrained(lowercase )
if __name__ == "__main__":
_UpperCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
_UpperCAmelCase : Dict = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 3 | 0 |
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
class lowercase ( enum.Enum ):
__lowercase : List[Any] = 0
__lowercase : Dict = 1
@add_end_docstrings(_SCREAMING_SNAKE_CASE )
class lowercase ( _SCREAMING_SNAKE_CASE ):
__lowercase : Any = "generated"
def __init__( self , *A_ , **A_ ) -> Optional[int]:
"""simple docstring"""
super().__init__(*A_ , **A_ )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == 'tf'
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def __UpperCamelCase ( self , A_=None , A_=None , A_=None , A_=None , A_=None , A_=None , **A_ , ) -> Dict:
"""simple docstring"""
UpperCamelCase = {}
if truncation is not None:
UpperCamelCase = truncation
UpperCamelCase = generate_kwargs
UpperCamelCase = {}
if return_tensors is not None and return_type is None:
UpperCamelCase = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
UpperCamelCase = return_type
if clean_up_tokenization_spaces is not None:
UpperCamelCase = clean_up_tokenization_spaces
if stop_sequence is not None:
UpperCamelCase = self.tokenizer.encode(A_ , add_special_tokens=A_ )
if len(A_ ) > 1:
warnings.warn(
'Stopping on a multiple token sequence is not yet supported on transformers. The first token of'
' the stop sequence will be used as the stop sequence string in the interim.' )
UpperCamelCase = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def __UpperCamelCase ( self , A_ , A_ , A_ ) -> List[Any]:
"""simple docstring"""
return True
def __UpperCamelCase ( self , *A_ , A_ ) -> List[str]:
"""simple docstring"""
UpperCamelCase = self.model.config.prefix if self.model.config.prefix is not None else ''
if isinstance(args[0] , A_ ):
if self.tokenizer.pad_token_id is None:
raise ValueError('Please make sure that the tokenizer has a pad_token_id when using a batch input' )
UpperCamelCase = ([prefix + arg for arg in args[0]],)
UpperCamelCase = True
elif isinstance(args[0] , A_ ):
UpperCamelCase = (prefix + args[0],)
UpperCamelCase = False
else:
raise ValueError(
F''' `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`''' )
UpperCamelCase = self.tokenizer(*A_ , padding=A_ , truncation=A_ , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self , *A_ , **A_ ) -> List[str]:
"""simple docstring"""
UpperCamelCase = super().__call__(*A_ , **A_ )
if (
isinstance(args[0] , A_ )
and all(isinstance(A_ , A_ ) for el in args[0] )
and all(len(A_ ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def __UpperCamelCase ( self , A_ , A_=TruncationStrategy.DO_NOT_TRUNCATE , **A_ ) -> Dict:
"""simple docstring"""
UpperCamelCase = self._parse_and_tokenize(A_ , truncation=A_ , **A_ )
return inputs
def __UpperCamelCase ( self , A_ , **A_ ) -> int:
"""simple docstring"""
if self.framework == "pt":
UpperCamelCase , UpperCamelCase = model_inputs['input_ids'].shape
elif self.framework == "tf":
UpperCamelCase , UpperCamelCase = tf.shape(model_inputs['input_ids'] ).numpy()
UpperCamelCase = generate_kwargs.get('min_length' , self.model.config.min_length )
UpperCamelCase = generate_kwargs.get('max_length' , self.model.config.max_length )
self.check_inputs(A_ , generate_kwargs['min_length'] , generate_kwargs['max_length'] )
UpperCamelCase = self.model.generate(**A_ , **A_ )
UpperCamelCase = output_ids.shape[0]
if self.framework == "pt":
UpperCamelCase = output_ids.reshape(A_ , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
UpperCamelCase = tf.reshape(A_ , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def __UpperCamelCase ( self , A_ , A_=ReturnType.TEXT , A_=False ) -> Dict:
"""simple docstring"""
UpperCamelCase = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
UpperCamelCase = {F'''{self.return_name}_token_ids''': output_ids}
elif return_type == ReturnType.TEXT:
UpperCamelCase = {
F'''{self.return_name}_text''': self.tokenizer.decode(
A_ , skip_special_tokens=A_ , clean_up_tokenization_spaces=A_ , )
}
records.append(A_ )
return records
@add_end_docstrings(_SCREAMING_SNAKE_CASE )
class lowercase ( _SCREAMING_SNAKE_CASE ):
__lowercase : Optional[int] = "summary"
def __call__( self , *A_ , **A_ ) -> List[str]:
"""simple docstring"""
return super().__call__(*A_ , **A_ )
def __UpperCamelCase ( self , A_ , A_ , A_ ) -> bool:
"""simple docstring"""
if max_length < min_length:
logger.warning(F'''Your min_length={min_length} must be inferior than your max_length={max_length}.''' )
if input_length < max_length:
logger.warning(
F'''Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is '''
'a summarization task, where outputs shorter than the input are typically wanted, you might '
F'''consider decreasing max_length manually, e.g. summarizer(\'...\', max_length={input_length//2})''' )
@add_end_docstrings(_SCREAMING_SNAKE_CASE )
class lowercase ( _SCREAMING_SNAKE_CASE ):
__lowercase : int = "translation"
def __UpperCamelCase ( self , A_ , A_ , A_ ) -> str:
"""simple docstring"""
if input_length > 0.9 * max_length:
logger.warning(
F'''Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider '''
'increasing your max_length manually, e.g. translator(\'...\', max_length=400)' )
return True
def __UpperCamelCase ( self , *A_ , A_=TruncationStrategy.DO_NOT_TRUNCATE , A_=None , A_=None ) -> Optional[int]:
"""simple docstring"""
if getattr(self.tokenizer , '_build_translation_inputs' , A_ ):
return self.tokenizer._build_translation_inputs(
*A_ , return_tensors=self.framework , truncation=A_ , src_lang=A_ , tgt_lang=A_ )
else:
return super()._parse_and_tokenize(*A_ , truncation=A_ )
def __UpperCamelCase ( self , A_=None , A_=None , **A_ ) -> Tuple:
"""simple docstring"""
UpperCamelCase , UpperCamelCase , UpperCamelCase = super()._sanitize_parameters(**A_ )
if src_lang is not None:
UpperCamelCase = src_lang
if tgt_lang is not None:
UpperCamelCase = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
UpperCamelCase = kwargs.get('task' , self.task )
UpperCamelCase = task.split('_' )
if task and len(A_ ) == 4:
# translation, XX, to YY
UpperCamelCase = items[1]
UpperCamelCase = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self , *A_ , **A_ ) -> List[str]:
"""simple docstring"""
return super().__call__(*A_ , **A_ )
| 714 |
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
_UpperCAmelCase : Any = "\\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n"
_UpperCAmelCase : str = "\\nGLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n"
_UpperCAmelCase : List[str] = "\nCompute GLUE evaluation metric associated to each GLUE dataset.\nArgs:\n predictions: list of predictions to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\nReturns: depending on the GLUE subset, one or several of:\n \"accuracy\": Accuracy\n \"f1\": F1 score\n \"pearson\": Pearson Correlation\n \"spearmanr\": Spearman Correlation\n \"matthews_correlation\": Matthew Correlation\nExamples:\n\n >>> glue_metric = datasets.load_metric('glue', 'sst2') # 'sst2' or any of [\"mnli\", \"mnli_mismatched\", \"mnli_matched\", \"qnli\", \"rte\", \"wnli\", \"hans\"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'mrpc') # 'mrpc' or 'qqp'\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0, 'f1': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'stsb')\n >>> references = [0., 1., 2., 3., 4., 5.]\n >>> predictions = [0., 1., 2., 3., 4., 5.]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print({\"pearson\": round(results[\"pearson\"], 2), \"spearmanr\": round(results[\"spearmanr\"], 2)})\n {'pearson': 1.0, 'spearmanr': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'cola')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'matthews_correlation': 1.0}\n"
def A ( lowercase , lowercase ) -> List[str]:
'''simple docstring'''
return float((preds == labels).mean() )
def A ( lowercase , lowercase ) -> Tuple:
'''simple docstring'''
UpperCamelCase = simple_accuracy(lowercase , lowercase )
UpperCamelCase = float(fa_score(y_true=lowercase , y_pred=lowercase ) )
return {
"accuracy": acc,
"f1": fa,
}
def A ( lowercase , lowercase ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase = float(pearsonr(lowercase , lowercase )[0] )
UpperCamelCase = float(spearmanr(lowercase , lowercase )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
'references': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
} ) , codebase_urls=[] , reference_urls=[] , format='numpy' , )
def __UpperCamelCase ( self , A_ , A_ ) -> Any:
"""simple docstring"""
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(A_ , A_ )}
elif self.config_name == "stsb":
return pearson_and_spearman(A_ , A_ )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(A_ , A_ )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(A_ , A_ )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' )
| 3 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCAmelCase : Any = logging.get_logger(__name__)
_UpperCAmelCase : Union[str, Any] = {
"facebook/deit-base-distilled-patch16-224": (
"https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json"
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class lowercase ( _SCREAMING_SNAKE_CASE ):
__lowercase : Optional[int] = "deit"
def __init__( self , A_=768 , A_=12 , A_=12 , A_=3_072 , A_="gelu" , A_=0.0 , A_=0.0 , A_=0.02 , A_=1e-12 , A_=224 , A_=16 , A_=3 , A_=True , A_=16 , **A_ , ) -> str:
"""simple docstring"""
super().__init__(**A_ )
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
UpperCamelCase = image_size
UpperCamelCase = patch_size
UpperCamelCase = num_channels
UpperCamelCase = qkv_bias
UpperCamelCase = encoder_stride
class lowercase ( _SCREAMING_SNAKE_CASE ):
__lowercase : Tuple = version.parse("1.11" )
@property
def __UpperCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __UpperCamelCase ( self ) -> float:
"""simple docstring"""
return 1e-4
| 715 |
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
_UpperCAmelCase : str = "scheduler_config.json"
class lowercase ( _SCREAMING_SNAKE_CASE ):
__lowercase : Tuple = 1
__lowercase : int = 2
__lowercase : List[Any] = 3
__lowercase : str = 4
__lowercase : Optional[Any] = 5
@dataclass
class lowercase ( _SCREAMING_SNAKE_CASE ):
__lowercase : jnp.ndarray
class lowercase :
__lowercase : Union[str, Any] = SCHEDULER_CONFIG_NAME
__lowercase : Dict = ["dtype"]
__lowercase : List[Any] = []
__lowercase : Dict = True
@classmethod
def __UpperCamelCase ( cls , A_ = None , A_ = None , A_=False , **A_ , ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = cls.load_config(
pretrained_model_name_or_path=A_ , subfolder=A_ , return_unused_kwargs=A_ , **A_ , )
UpperCamelCase , UpperCamelCase = cls.from_config(A_ , return_unused_kwargs=A_ , **A_ )
if hasattr(A_ , 'create_state' ) and getattr(A_ , 'has_state' , A_ ):
UpperCamelCase = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def __UpperCamelCase ( self , A_ , A_ = False , **A_ ) -> str:
"""simple docstring"""
self.save_config(save_directory=A_ , push_to_hub=A_ , **A_ )
@property
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
return self._get_compatibles()
@classmethod
def __UpperCamelCase ( cls ) -> int:
"""simple docstring"""
UpperCamelCase = list(set([cls.__name__] + cls._compatibles ) )
UpperCamelCase = importlib.import_module(__name__.split('.' )[0] )
UpperCamelCase = [
getattr(A_ , A_ ) for c in compatible_classes_str if hasattr(A_ , A_ )
]
return compatible_classes
def A ( lowercase , lowercase ) -> jnp.ndarray:
'''simple docstring'''
assert len(lowercase ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(lowercase ) - x.ndim) ) , lowercase )
def A ( lowercase , lowercase=0.9_9_9 , lowercase=jnp.floataa ) -> jnp.ndarray:
'''simple docstring'''
def alpha_bar(lowercase ):
return math.cos((time_step + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
UpperCamelCase = []
for i in range(lowercase ):
UpperCamelCase = i / num_diffusion_timesteps
UpperCamelCase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(lowercase ) / alpha_bar(lowercase ) , lowercase ) )
return jnp.array(lowercase , dtype=lowercase )
@flax.struct.dataclass
class lowercase :
__lowercase : jnp.ndarray
__lowercase : jnp.ndarray
__lowercase : jnp.ndarray
@classmethod
def __UpperCamelCase ( cls , A_ ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = scheduler.config
if config.trained_betas is not None:
UpperCamelCase = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
UpperCamelCase = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
UpperCamelCase = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
UpperCamelCase = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
F'''beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}''' )
UpperCamelCase = 1.0 - betas
UpperCamelCase = jnp.cumprod(A_ , axis=0 )
return cls(
alphas=A_ , betas=A_ , alphas_cumprod=A_ , )
def A ( lowercase , lowercase , lowercase , lowercase ) -> List[Any]:
'''simple docstring'''
UpperCamelCase = state.alphas_cumprod
UpperCamelCase = alphas_cumprod[timesteps] ** 0.5
UpperCamelCase = sqrt_alpha_prod.flatten()
UpperCamelCase = broadcast_to_shape_from_left(lowercase , original_samples.shape )
UpperCamelCase = (1 - alphas_cumprod[timesteps]) ** 0.5
UpperCamelCase = sqrt_one_minus_alpha_prod.flatten()
UpperCamelCase = broadcast_to_shape_from_left(lowercase , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def A ( lowercase , lowercase , lowercase , lowercase ) -> Dict:
'''simple docstring'''
UpperCamelCase , UpperCamelCase = get_sqrt_alpha_prod(lowercase , lowercase , lowercase , lowercase )
UpperCamelCase = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def A ( lowercase , lowercase , lowercase , lowercase ) -> int:
'''simple docstring'''
UpperCamelCase , UpperCamelCase = get_sqrt_alpha_prod(lowercase , lowercase , lowercase , lowercase )
UpperCamelCase = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 3 | 0 |
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class lowercase :
def __init__( self , A_ , A_ = 13 , A_ = 64 , A_ = 2 , A_ = 3 , A_ = 3 , A_ = True , A_ = True , A_ = 128 , A_=[16, 32, 64, 128] , A_ = 7 , A_ = 4 , A_ = 37 , A_ = "gelu" , A_ = 0.1 , A_ = 0.1 , A_ = 10 , A_ = 0.02 , A_ = 2 , A_ = 1 , A_ = 128 , A_ = [2, 2, 2, 2] , A_ = 2 , A_ = 2 , ) -> int:
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = image_size
UpperCamelCase = patch_size
UpperCamelCase = num_channels
UpperCamelCase = is_training
UpperCamelCase = use_labels
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = encoder_stride
UpperCamelCase = num_attention_outputs
UpperCamelCase = embed_dim
UpperCamelCase = embed_dim + 1
UpperCamelCase = resolution
UpperCamelCase = depths
UpperCamelCase = hidden_sizes
UpperCamelCase = dim
UpperCamelCase = mlp_expansion_ratio
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def __UpperCamelCase ( self , A_ , A_ , A_ ) -> Tuple:
"""simple docstring"""
UpperCamelCase = TFEfficientFormerModel(config=A_ )
UpperCamelCase = model(A_ , training=A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self , A_ , A_ , A_ ) -> Any:
"""simple docstring"""
UpperCamelCase = self.type_sequence_label_size
UpperCamelCase = TFEfficientFormerForImageClassification(A_ )
UpperCamelCase = model(A_ , labels=A_ , training=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCamelCase = 1
UpperCamelCase = TFEfficientFormerForImageClassification(A_ )
UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase = model(A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs
UpperCamelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
__lowercase : List[Any] = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
__lowercase : Optional[int] = (
{
"feature-extraction": TFEfficientFormerModel,
"image-classification": (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
__lowercase : List[Any] = False
__lowercase : int = False
__lowercase : int = False
__lowercase : str = False
__lowercase : Dict = False
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
UpperCamelCase = TFEfficientFormerModelTester(self )
UpperCamelCase = ConfigTester(
self , config_class=A_ , has_text_modality=A_ , hidden_size=37 )
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='EfficientFormer does not use inputs_embeds' )
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip(reason='EfficientFormer does not support input and output embeddings' )
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
pass
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(A_ )
UpperCamelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase = [*signature.parameters.keys()]
UpperCamelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , A_ )
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
def check_hidden_states_output(A_ , A_ , A_ ):
UpperCamelCase = model_class(A_ )
UpperCamelCase = model(**self._prepare_for_class(A_ , A_ ) , training=A_ )
UpperCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCamelCase = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(A_ ) , A_ )
if hasattr(self.model_tester , 'encoder_seq_length' ):
UpperCamelCase = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , 'chunk_length' ) and self.model_tester.chunk_length > 1:
UpperCamelCase = seq_length * self.model_tester.chunk_length
else:
UpperCamelCase = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
UpperCamelCase = outputs.decoder_hidden_states
self.asseretIsInstance(A_ , (list, tuple) )
self.assertEqual(len(A_ ) , A_ )
UpperCamelCase = getattr(self.model_tester , 'seq_length' , A_ )
UpperCamelCase = getattr(self.model_tester , 'decoder_seq_length' , A_ )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = True
check_hidden_states_output(A_ , A_ , A_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase = True
check_hidden_states_output(A_ , A_ , A_ )
def __UpperCamelCase ( self , A_ , A_ , A_=False ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = super()._prepare_for_class(A_ , A_ , return_labels=A_ )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
@unittest.skip(reason='EfficientFormer does not implement masked image modeling yet' )
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*A_ )
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A_ )
@slow
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = TFEfficientFormerModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = True
UpperCamelCase = getattr(self.model_tester , 'seq_length' , A_ )
UpperCamelCase = getattr(self.model_tester , 'encoder_seq_length' , A_ )
UpperCamelCase = getattr(self.model_tester , 'key_length' , A_ )
UpperCamelCase = getattr(self.model_tester , 'chunk_length' , A_ )
if chunk_length is not None and hasattr(self.model_tester , 'num_hashes' ):
UpperCamelCase = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
UpperCamelCase = True
UpperCamelCase = False
UpperCamelCase = True
UpperCamelCase = model_class(A_ )
UpperCamelCase = model(**self._prepare_for_class(A_ , A_ ) , training=A_ )
UpperCamelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(A_ ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCamelCase = True
UpperCamelCase = model_class(A_ )
UpperCamelCase = model(**self._prepare_for_class(A_ , A_ ) , training=A_ )
UpperCamelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(A_ ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
UpperCamelCase = model_class(A_ )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
UpperCamelCase = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=A_ )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
UpperCamelCase = model(A_ )
self.assertTrue(outputs_dict is not None )
def A ( ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class lowercase ( unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
return (
EfficientFormerImageProcessor.from_pretrained('snap-research/efficientformer-l1-300' )
if is_vision_available()
else None
)
@slow
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = TFEfficientFormerForImageClassification.from_pretrained('snap-research/efficientformer-l1-300' )
UpperCamelCase = self.default_image_processor
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=A_ , return_tensors='tf' )
# forward pass
UpperCamelCase = model(**A_ , training=A_ )
# verify the logits
UpperCamelCase = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , A_ )
UpperCamelCase = tf.constant([-0.0555, 0.4825, -0.0852] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , A_ , atol=1e-4 ) )
@slow
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
'snap-research/efficientformer-l1-300' )
UpperCamelCase = self.default_image_processor
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=A_ , return_tensors='tf' )
# forward pass
UpperCamelCase = model(**A_ , training=A_ )
# verify the logits
UpperCamelCase = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , A_ )
UpperCamelCase = tf.constant([-0.1312, 0.4353, -1.0499] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , A_ , atol=1e-4 ) )
| 716 |
from abc import ABC, abstractmethod
from typing import List, Optional
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self ) -> Optional[Any]:
"""simple docstring"""
# test for the above condition
self.test()
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = 0
UpperCamelCase = False
while not completed:
if counter == 1:
self.reset()
UpperCamelCase = self.advance()
if not self.does_advance(A_ ):
raise Exception(
'Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.' )
UpperCamelCase , UpperCamelCase , UpperCamelCase = self.update(A_ )
counter += 1
if counter > 10_000:
raise Exception('update() does not fulfill the constraint.' )
if self.remaining() != 0:
raise Exception('Custom Constraint is not defined correctly.' )
@abstractmethod
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __UpperCamelCase ( self , A_ ) -> str:
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __UpperCamelCase ( self , A_ ) -> int:
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __UpperCamelCase ( self , A_=False ) -> int:
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ ) -> Any:
"""simple docstring"""
super(A_ , self ).__init__()
if not isinstance(A_ , A_ ) or len(A_ ) == 0:
raise ValueError(F'''`token_ids` has to be a non-empty list, but is {token_ids}.''' )
if any((not isinstance(A_ , A_ ) or token_id < 0) for token_id in token_ids ):
raise ValueError(F'''Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.''' )
UpperCamelCase = token_ids
UpperCamelCase = len(self.token_ids )
UpperCamelCase = -1 # the index of the currently fulfilled step
UpperCamelCase = False
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def __UpperCamelCase ( self , A_ ) -> Optional[int]:
"""simple docstring"""
if not isinstance(A_ , A_ ):
raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(A_ )}''' )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def __UpperCamelCase ( self , A_ ) -> Optional[int]:
"""simple docstring"""
if not isinstance(A_ , A_ ):
raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(A_ )}''' )
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
if self.does_advance(A_ ):
self.fulfilled_idx += 1
UpperCamelCase = True
if self.fulfilled_idx == (self.seqlen - 1):
UpperCamelCase = True
UpperCamelCase = completed
else:
# failed to make progress.
UpperCamelCase = True
self.reset()
return stepped, completed, reset
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = False
UpperCamelCase = 0
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
return self.seqlen - (self.fulfilled_idx + 1)
def __UpperCamelCase ( self , A_=False ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = PhrasalConstraint(self.token_ids )
if stateful:
UpperCamelCase = self.seqlen
UpperCamelCase = self.fulfilled_idx
UpperCamelCase = self.completed
return new_constraint
class lowercase :
def __init__( self , A_ , A_=True ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = max([len(A_ ) for one in nested_token_ids] )
UpperCamelCase = {}
for token_ids in nested_token_ids:
UpperCamelCase = root
for tidx, token_id in enumerate(A_ ):
if token_id not in level:
UpperCamelCase = {}
UpperCamelCase = level[token_id]
if no_subsets and self.has_subsets(A_ , A_ ):
raise ValueError(
'Each list in `nested_token_ids` can\'t be a complete subset of another list, but is'
F''' {nested_token_ids}.''' )
UpperCamelCase = root
def __UpperCamelCase ( self , A_ ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = self.trie
for current_token in current_seq:
UpperCamelCase = start[current_token]
UpperCamelCase = list(start.keys() )
return next_tokens
def __UpperCamelCase ( self , A_ ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = self.next_tokens(A_ )
return len(A_ ) == 0
def __UpperCamelCase ( self , A_ ) -> List[str]:
"""simple docstring"""
UpperCamelCase = list(root.values() )
if len(A_ ) == 0:
return 1
else:
return sum([self.count_leaves(A_ ) for nn in next_nodes] )
def __UpperCamelCase ( self , A_ , A_ ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = self.count_leaves(A_ )
return len(A_ ) != leaf_count
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ ) -> str:
"""simple docstring"""
super(A_ , self ).__init__()
if not isinstance(A_ , A_ ) or len(A_ ) == 0:
raise ValueError(F'''`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.''' )
if any(not isinstance(A_ , A_ ) for token_ids in nested_token_ids ):
raise ValueError(F'''`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.''' )
if any(
any((not isinstance(A_ , A_ ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
F'''Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.''' )
UpperCamelCase = DisjunctiveTrie(A_ )
UpperCamelCase = nested_token_ids
UpperCamelCase = self.trie.max_height
UpperCamelCase = []
UpperCamelCase = False
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = self.trie.next_tokens(self.current_seq )
if len(A_ ) == 0:
return None
else:
return token_list
def __UpperCamelCase ( self , A_ ) -> Optional[Any]:
"""simple docstring"""
if not isinstance(A_ , A_ ):
raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(A_ )}''' )
UpperCamelCase = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def __UpperCamelCase ( self , A_ ) -> Optional[Any]:
"""simple docstring"""
if not isinstance(A_ , A_ ):
raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(A_ )}''' )
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
if self.does_advance(A_ ):
self.current_seq.append(A_ )
UpperCamelCase = True
else:
UpperCamelCase = True
self.reset()
UpperCamelCase = self.trie.reached_leaf(self.current_seq )
UpperCamelCase = completed
return stepped, completed, reset
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
UpperCamelCase = False
UpperCamelCase = []
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def __UpperCamelCase ( self , A_=False ) -> int:
"""simple docstring"""
UpperCamelCase = DisjunctiveConstraint(self.token_ids )
if stateful:
UpperCamelCase = self.seqlen
UpperCamelCase = self.current_seq
UpperCamelCase = self.completed
return new_constraint
class lowercase :
def __init__( self , A_ ) -> Tuple:
"""simple docstring"""
UpperCamelCase = constraints
# max # of steps required to fulfill a given constraint
UpperCamelCase = max([c.seqlen for c in constraints] )
UpperCamelCase = len(A_ )
UpperCamelCase = False
self.init_state()
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = []
UpperCamelCase = None
UpperCamelCase = [constraint.copy(stateful=A_ ) for constraint in self.constraints]
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
UpperCamelCase = constraint.advance()
if isinstance(A_ , A_ ):
token_list.append(A_ )
elif isinstance(A_ , A_ ):
token_list.extend(A_ )
else:
UpperCamelCase = self.inprogress_constraint.advance()
if isinstance(A_ , A_ ):
token_list.append(A_ )
elif isinstance(A_ , A_ ):
token_list.extend(A_ )
if len(A_ ) == 0:
return None
else:
return token_list
def __UpperCamelCase ( self , A_ ) -> Any:
"""simple docstring"""
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
UpperCamelCase , UpperCamelCase = self.add(A_ )
# the entire list of constraints are fulfilled
if self.completed:
break
def __UpperCamelCase ( self , A_ ) -> int:
"""simple docstring"""
if not isinstance(A_ , A_ ):
raise ValueError(F'''`token_id` should be an `int`, but is `{token_id}`.''' )
UpperCamelCase , UpperCamelCase = False, False
if self.completed:
UpperCamelCase = True
UpperCamelCase = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
UpperCamelCase , UpperCamelCase , UpperCamelCase = self.inprogress_constraint.update(A_ )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=A_ ) )
UpperCamelCase = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
UpperCamelCase = None
if len(self.pending_constraints ) == 0:
# we're done!
UpperCamelCase = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(A_ ):
UpperCamelCase , UpperCamelCase , UpperCamelCase = pending_constraint.update(A_ )
if not stepped:
raise Exception(
'`constraint.update(token_id)` is not yielding incremental progress, '
'even though `constraint.does_advance(token_id)` is true.' )
if complete:
self.complete_constraints.append(A_ )
UpperCamelCase = None
if not complete and stepped:
UpperCamelCase = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
UpperCamelCase = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
UpperCamelCase = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def __UpperCamelCase ( self , A_=True ) -> Tuple:
"""simple docstring"""
UpperCamelCase = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
UpperCamelCase = [
constraint.copy(stateful=A_ ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
UpperCamelCase = self.inprogress_constraint.copy(stateful=A_ )
UpperCamelCase = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 3 | 0 |
def A ( lowercase ) -> bool:
'''simple docstring'''
if not isinstance(lowercase , lowercase ):
UpperCamelCase = f'''Input value of [number={number}] must be an integer'''
raise TypeError(lowercase )
if number < 0:
return False
UpperCamelCase = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 717 |
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
_UpperCAmelCase : str = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
@register_to_config
def __init__( self , A_ , A_ = None , A_ = None ) -> Any:
"""simple docstring"""
super().__init__()
UpperCamelCase = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
UpperCamelCase = torch.zeros(A_ , A_ )
else:
UpperCamelCase = None
UpperCamelCase = torch.nn.Parameter(A_ )
class lowercase ( _SCREAMING_SNAKE_CASE ):
__lowercase : VQModel
__lowercase : CLIPTextModel
__lowercase : CLIPTokenizer
__lowercase : TransformeraDModel
__lowercase : LearnedClassifierFreeSamplingEmbeddings
__lowercase : VQDiffusionScheduler
def __init__( self , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
self.register_modules(
vqvae=A_ , transformer=A_ , text_encoder=A_ , tokenizer=A_ , scheduler=A_ , learned_classifier_free_sampling_embeddings=A_ , )
def __UpperCamelCase ( self , A_ , A_ , A_ ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = len(A_ ) if isinstance(A_ , A_ ) else 1
# get prompt text embeddings
UpperCamelCase = self.tokenizer(
A_ , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
UpperCamelCase = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
UpperCamelCase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
F''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
UpperCamelCase = text_input_ids[:, : self.tokenizer.model_max_length]
UpperCamelCase = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
UpperCamelCase = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=A_ )
# duplicate text embeddings for each generation per prompt
UpperCamelCase = prompt_embeds.repeat_interleave(A_ , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
UpperCamelCase = self.learned_classifier_free_sampling_embeddings.embeddings
UpperCamelCase = negative_prompt_embeds.unsqueeze(0 ).repeat(A_ , 1 , 1 )
else:
UpperCamelCase = [''] * batch_size
UpperCamelCase = text_input_ids.shape[-1]
UpperCamelCase = self.tokenizer(
A_ , padding='max_length' , max_length=A_ , truncation=A_ , return_tensors='pt' , )
UpperCamelCase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
UpperCamelCase = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=A_ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCamelCase = negative_prompt_embeds.shape[1]
UpperCamelCase = negative_prompt_embeds.repeat(1 , A_ , 1 )
UpperCamelCase = negative_prompt_embeds.view(batch_size * num_images_per_prompt , A_ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCamelCase = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self , A_ , A_ = 100 , A_ = 5.0 , A_ = 1.0 , A_ = 1 , A_ = None , A_ = None , A_ = "pil" , A_ = True , A_ = None , A_ = 1 , ) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
if isinstance(A_ , A_ ):
UpperCamelCase = 1
elif isinstance(A_ , A_ ):
UpperCamelCase = len(A_ )
else:
raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(A_ )}''' )
UpperCamelCase = batch_size * num_images_per_prompt
UpperCamelCase = guidance_scale > 1.0
UpperCamelCase = self._encode_prompt(A_ , A_ , A_ )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(A_ , A_ ) or callback_steps <= 0)
):
raise ValueError(
F'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
F''' {type(A_ )}.''' )
# get the initial completely masked latents unless the user supplied it
UpperCamelCase = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
UpperCamelCase = self.transformer.num_vector_embeds - 1
UpperCamelCase = torch.full(A_ , A_ ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'
F''' {self.transformer.num_vector_embeds - 1} (inclusive).''' )
UpperCamelCase = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(A_ , device=self.device )
UpperCamelCase = self.scheduler.timesteps.to(self.device )
UpperCamelCase = latents
for i, t in enumerate(self.progress_bar(A_ ) ):
# expand the sample if we are doing classifier free guidance
UpperCamelCase = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
UpperCamelCase = self.transformer(A_ , encoder_hidden_states=A_ , timestep=A_ ).sample
if do_classifier_free_guidance:
UpperCamelCase , UpperCamelCase = model_output.chunk(2 )
UpperCamelCase = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(A_ , dim=1 , keepdim=A_ )
UpperCamelCase = self.truncate(A_ , A_ )
# remove `log(0)`'s (`-inf`s)
UpperCamelCase = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase = self.scheduler.step(A_ , timestep=A_ , sample=A_ , generator=A_ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(A_ , A_ , A_ )
UpperCamelCase = self.vqvae.config.vq_embed_dim
UpperCamelCase = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
UpperCamelCase = self.vqvae.quantize.get_codebook_entry(A_ , shape=A_ )
UpperCamelCase = self.vqvae.decode(A_ , force_not_quantize=A_ ).sample
UpperCamelCase = (image / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCamelCase = self.numpy_to_pil(A_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A_ )
def __UpperCamelCase ( self , A_ , A_ ) -> torch.FloatTensor:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = torch.sort(A_ , 1 , descending=A_ )
UpperCamelCase = torch.exp(A_ )
UpperCamelCase = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
UpperCamelCase = torch.full_like(keep_mask[:, 0:1, :] , A_ )
UpperCamelCase = torch.cat((all_true, keep_mask) , dim=1 )
UpperCamelCase = keep_mask[:, :-1, :]
UpperCamelCase = keep_mask.gather(1 , indices.argsort(1 ) )
UpperCamelCase = log_p_x_0.clone()
UpperCamelCase = -torch.inf # -inf = log(0)
return rv
| 3 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_UpperCAmelCase : Dict = {
"configuration_falcon": ["FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP", "FalconConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[str] = [
"FALCON_PRETRAINED_MODEL_ARCHIVE_LIST",
"FalconForCausalLM",
"FalconModel",
"FalconPreTrainedModel",
"FalconForSequenceClassification",
"FalconForTokenClassification",
"FalconForQuestionAnswering",
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 718 |
from string import ascii_uppercase
_UpperCAmelCase : Dict = {char: i for i, char in enumerate(ascii_uppercase)}
_UpperCAmelCase : Tuple = dict(enumerate(ascii_uppercase))
def A ( lowercase , lowercase ) -> str:
'''simple docstring'''
UpperCamelCase = len(lowercase )
UpperCamelCase = 0
while True:
if x == i:
UpperCamelCase = 0
if len(lowercase ) == len(lowercase ):
break
key += key[i]
i += 1
return key
def A ( lowercase , lowercase ) -> str:
'''simple docstring'''
UpperCamelCase = ''
UpperCamelCase = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
UpperCamelCase = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def A ( lowercase , lowercase ) -> str:
'''simple docstring'''
UpperCamelCase = ''
UpperCamelCase = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
UpperCamelCase = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def A ( ) -> None:
'''simple docstring'''
UpperCamelCase = 'THE GERMAN ATTACK'
UpperCamelCase = 'SECRET'
UpperCamelCase = generate_key(lowercase , lowercase )
UpperCamelCase = cipher_text(lowercase , lowercase )
print(f'''Encrypted Text = {s}''' )
print(f'''Original Text = {original_text(lowercase , lowercase )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 3 | 0 |
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase ( unittest.TestCase ):
def __init__( self , A_ , A_=3 , A_=32 , A_=3 , A_=10 , A_=[10, 20, 30, 40] , A_=[1, 1, 2, 1] , A_=True , A_=True , A_="relu" , A_=3 , A_=None , ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = image_size
UpperCamelCase = num_channels
UpperCamelCase = embeddings_size
UpperCamelCase = hidden_sizes
UpperCamelCase = depths
UpperCamelCase = is_training
UpperCamelCase = use_labels
UpperCamelCase = hidden_act
UpperCamelCase = num_labels
UpperCamelCase = scope
UpperCamelCase = len(A_ )
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase = self.get_config()
return config, pixel_values
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def __UpperCamelCase ( self , A_ , A_ ) -> str:
"""simple docstring"""
UpperCamelCase = FlaxRegNetModel(config=A_ )
UpperCamelCase = model(A_ )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __UpperCamelCase ( self , A_ , A_ ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = self.num_labels
UpperCamelCase = FlaxRegNetForImageClassification(config=A_ )
UpperCamelCase = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase = config_and_inputs
UpperCamelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class lowercase ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
__lowercase : List[Any] = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
__lowercase : int = False
__lowercase : Dict = False
__lowercase : Optional[Any] = False
def __UpperCamelCase ( self ) -> None:
"""simple docstring"""
UpperCamelCase = FlaxRegNetModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=A_ , has_text_modality=A_ )
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
return
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A_ )
@unittest.skip(reason='RegNet does not use inputs_embeds' )
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip(reason='RegNet does not support input and output embeddings' )
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
pass
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(A_ )
UpperCamelCase = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase = [*signature.parameters.keys()]
UpperCamelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , A_ )
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
def check_hidden_states_output(A_ , A_ , A_ ):
UpperCamelCase = model_class(A_ )
UpperCamelCase = model(**self._prepare_for_class(A_ , A_ ) )
UpperCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCamelCase = self.model_tester.num_stages
self.assertEqual(len(A_ ) , expected_num_stages + 1 )
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = True
check_hidden_states_output(A_ , A_ , A_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase = True
check_hidden_states_output(A_ , A_ , A_ )
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCamelCase = self._prepare_for_class(A_ , A_ )
UpperCamelCase = model_class(A_ )
@jax.jit
def model_jitted(A_ , **A_ ):
return model(pixel_values=A_ , **A_ )
with self.subTest('JIT Enabled' ):
UpperCamelCase = model_jitted(**A_ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
UpperCamelCase = model_jitted(**A_ ).to_tuple()
self.assertEqual(len(A_ ) , len(A_ ) )
for jitted_output, output in zip(A_ , A_ ):
self.assertEqual(jitted_output.shape , output.shape )
def A ( ) -> List[str]:
'''simple docstring'''
UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_flax
class lowercase ( unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
return AutoImageProcessor.from_pretrained('facebook/regnet-y-040' ) if is_vision_available() else None
@slow
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = FlaxRegNetForImageClassification.from_pretrained('facebook/regnet-y-040' )
UpperCamelCase = self.default_image_processor
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=A_ , return_tensors='np' )
UpperCamelCase = model(**A_ )
# verify the logits
UpperCamelCase = (1, 1_000)
self.assertEqual(outputs.logits.shape , A_ )
UpperCamelCase = jnp.array([-0.4180, -1.5051, -3.4836] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , A_ , atol=1e-4 ) )
| 719 |
from collections.abc import Callable
def A ( lowercase , lowercase , lowercase ) -> float:
'''simple docstring'''
UpperCamelCase = a
UpperCamelCase = b
if function(lowercase ) == 0: # one of the a or b is a root for the function
return a
elif function(lowercase ) == 0:
return b
elif (
function(lowercase ) * function(lowercase ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('could not find root in given interval.' )
else:
UpperCamelCase = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(lowercase ) == 0:
return mid
elif function(lowercase ) * function(lowercase ) < 0:
UpperCamelCase = mid
else:
UpperCamelCase = mid
UpperCamelCase = start + (end - start) / 2.0
return mid
def A ( lowercase ) -> float:
'''simple docstring'''
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1_000))
import doctest
doctest.testmod()
| 3 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCAmelCase : Tuple = logging.get_logger(__name__)
_UpperCAmelCase : Union[str, Any] = {
"facebook/data2vec-text-base": "https://huggingface.co/data2vec/resolve/main/config.json",
}
class lowercase ( _SCREAMING_SNAKE_CASE ):
__lowercase : Dict = "data2vec-text"
def __init__( self , A_=30_522 , A_=768 , A_=12 , A_=12 , A_=3_072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=2 , A_=0.02 , A_=1e-12 , A_=1 , A_=0 , A_=2 , A_="absolute" , A_=True , A_=None , **A_ , ) -> Any:
"""simple docstring"""
super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ )
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = hidden_act
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
UpperCamelCase = position_embedding_type
UpperCamelCase = use_cache
UpperCamelCase = classifier_dropout
class lowercase ( _SCREAMING_SNAKE_CASE ):
@property
def __UpperCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
UpperCamelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
UpperCamelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 720 |
import os
_UpperCAmelCase : int = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 100, "D": 500, "M": 1_000}
def A ( lowercase ) -> int:
'''simple docstring'''
UpperCamelCase = 0
UpperCamelCase = 0
while index < len(lowercase ) - 1:
UpperCamelCase = SYMBOLS[numerals[index]]
UpperCamelCase = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def A ( lowercase ) -> str:
'''simple docstring'''
UpperCamelCase = ''
UpperCamelCase = num // 1_000
numerals += m_count * "M"
num %= 1_000
UpperCamelCase = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
UpperCamelCase = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def A ( lowercase = "/p089_roman.txt" ) -> int:
'''simple docstring'''
UpperCamelCase = 0
with open(os.path.dirname(lowercase ) + roman_numerals_filename ) as filea:
UpperCamelCase = filea.readlines()
for line in lines:
UpperCamelCase = line.strip()
UpperCamelCase = parse_roman_numerals(lowercase )
UpperCamelCase = generate_roman_numerals(lowercase )
savings += len(lowercase ) - len(lowercase )
return savings
if __name__ == "__main__":
print(F'''{solution() = }''')
| 3 | 0 |
def A ( ) -> str:
'''simple docstring'''
for n in range(1 , 1_000_000 ):
yield n * (n + 1) // 2
def A ( lowercase ) -> List[Any]:
'''simple docstring'''
UpperCamelCase = 1
UpperCamelCase = 2
while i * i <= n:
UpperCamelCase = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def A ( ) -> Any:
'''simple docstring'''
return next(i for i in triangle_number_generator() if count_divisors(lowercase ) > 500 )
if __name__ == "__main__":
print(solution())
| 721 |
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize('dataset_size' , [None, 400 * 2**20, 600 * 2**20] )
@pytest.mark.parametrize('input_in_memory_max_size' , ['default', 0, 100 * 2**20, 900 * 2**20] )
def A ( lowercase , lowercase , lowercase ) -> Union[str, Any]:
'''simple docstring'''
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , 'IN_MEMORY_MAX_SIZE' , lowercase )
UpperCamelCase = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
UpperCamelCase = dataset_size < in_memory_max_size
else:
UpperCamelCase = False
UpperCamelCase = is_small_dataset(lowercase )
assert result == expected
| 3 | 0 |
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : str = logging.get_logger(__name__)
_UpperCAmelCase : Optional[Any] = {
"snap-research/efficientformer-l1-300": (
"https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json"
),
}
class lowercase ( _SCREAMING_SNAKE_CASE ):
__lowercase : Optional[int] = "efficientformer"
def __init__( self , A_ = [3, 2, 6, 4] , A_ = [48, 96, 224, 448] , A_ = [True, True, True, True] , A_ = 448 , A_ = 32 , A_ = 4 , A_ = 7 , A_ = 5 , A_ = 8 , A_ = 4 , A_ = 0.0 , A_ = 16 , A_ = 3 , A_ = 3 , A_ = 3 , A_ = 2 , A_ = 1 , A_ = 0.0 , A_ = 1 , A_ = True , A_ = True , A_ = 1e-5 , A_ = "gelu" , A_ = 0.02 , A_ = 1e-12 , A_ = 224 , A_ = 1e-05 , **A_ , ) -> None:
"""simple docstring"""
super().__init__(**A_ )
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = hidden_sizes
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
UpperCamelCase = patch_size
UpperCamelCase = num_channels
UpperCamelCase = depths
UpperCamelCase = mlp_expansion_ratio
UpperCamelCase = downsamples
UpperCamelCase = dim
UpperCamelCase = key_dim
UpperCamelCase = attention_ratio
UpperCamelCase = resolution
UpperCamelCase = pool_size
UpperCamelCase = downsample_patch_size
UpperCamelCase = downsample_stride
UpperCamelCase = downsample_pad
UpperCamelCase = drop_path_rate
UpperCamelCase = num_metaad_blocks
UpperCamelCase = distillation
UpperCamelCase = use_layer_scale
UpperCamelCase = layer_scale_init_value
UpperCamelCase = image_size
UpperCamelCase = batch_norm_eps
| 700 |
def A ( lowercase , lowercase ) -> str:
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
UpperCamelCase = str(bin(lowercase ) )[2:] # remove the leading "0b"
UpperCamelCase = str(bin(lowercase ) )[2:] # remove the leading "0b"
UpperCamelCase = max(len(lowercase ) , len(lowercase ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(lowercase ) , b_binary.zfill(lowercase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 3 | 0 |
_UpperCAmelCase : Optional[Any] = {
"A": ".-", "B": "-...", "C": "-.-.", "D": "-..", "E": ".", "F": "..-.", "G": "--.",
"H": "....", "I": "..", "J": ".---", "K": "-.-", "L": ".-..", "M": "--", "N": "-.",
"O": "---", "P": ".--.", "Q": "--.-", "R": ".-.", "S": "...", "T": "-", "U": "..-",
"V": "...-", "W": ".--", "X": "-..-", "Y": "-.--", "Z": "--..", "1": ".----",
"2": "..---", "3": "...--", "4": "....-", "5": ".....", "6": "-....", "7": "--...",
"8": "---..", "9": "----.", "0": "-----", "&": ".-...", "@": ".--.-.",
":": "---...", ",": "--..--", ".": ".-.-.-", "'": ".----.", "\"": ".-..-.",
"?": "..--..", "/": "-..-.", "=": "-...-", "+": ".-.-.", "-": "-....-",
"(": "-.--.", ")": "-.--.-", "!": "-.-.--", " ": "/"
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
_UpperCAmelCase : Union[str, Any] = {value: key for key, value in MORSE_CODE_DICT.items()}
def A ( lowercase ) -> str:
'''simple docstring'''
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def A ( lowercase ) -> str:
'''simple docstring'''
return "".join(REVERSE_DICT[char] for char in message.split() )
def A ( ) -> None:
'''simple docstring'''
UpperCamelCase = 'Morse code here!'
print(lowercase )
UpperCamelCase = encrypt(lowercase )
print(lowercase )
UpperCamelCase = decrypt(lowercase )
print(lowercase )
if __name__ == "__main__":
main()
| 701 |
import re
def A ( lowercase ) -> str:
'''simple docstring'''
if len(re.findall('[ATCG]' , lowercase ) ) != len(lowercase ):
raise ValueError('Invalid Strand' )
return dna.translate(dna.maketrans('ATCG' , 'TAGC' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 3 | 0 |
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class lowercase :
def __init__( self , A_ , A_=13 , A_=10 , A_=3 , A_=2 , A_=2 , A_=True , A_=True , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=10 , A_=0.02 , A_="divided_space_time" , A_=None , ) -> Any:
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = image_size
UpperCamelCase = num_channels
UpperCamelCase = patch_size
UpperCamelCase = num_frames
UpperCamelCase = is_training
UpperCamelCase = use_labels
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = attention_type
UpperCamelCase = initializer_range
UpperCamelCase = scope
UpperCamelCase = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
UpperCamelCase = (image_size // patch_size) ** 2
UpperCamelCase = (num_frames) * self.num_patches_per_frame + 1
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
UpperCamelCase = self.num_labels
return config
def __UpperCamelCase ( self , A_ , A_ , A_ ) -> Dict:
"""simple docstring"""
UpperCamelCase = TimesformerModel(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self , A_ , A_ , A_ ) -> str:
"""simple docstring"""
UpperCamelCase = TimesformerForVideoClassification(A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ )
# verify the logits shape
UpperCamelCase = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , A_ )
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs
UpperCamelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
__lowercase : Optional[int] = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
__lowercase : Dict = (
{"feature-extraction": TimesformerModel, "video-classification": TimesformerForVideoClassification}
if is_torch_available()
else {}
)
__lowercase : Optional[Any] = False
__lowercase : int = False
__lowercase : Any = False
__lowercase : Any = False
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = TimesformerModelTester(self )
UpperCamelCase = ConfigTester(
self , config_class=A_ , has_text_modality=A_ , hidden_size=37 )
def __UpperCamelCase ( self , A_ , A_ , A_=False ) -> Any:
"""simple docstring"""
UpperCamelCase = copy.deepcopy(A_ )
if return_labels:
if model_class in get_values(A_ ):
UpperCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A_ )
return inputs_dict
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='TimeSformer does not use inputs_embeds' )
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
pass
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(A_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A_ , nn.Linear ) )
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(A_ )
UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase = [*signature.parameters.keys()]
UpperCamelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , A_ )
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*A_ )
@slow
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = TimesformerModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
if not self.has_attentions:
pass
else:
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = True
for model_class in self.all_model_classes:
UpperCamelCase = self.model_tester.seq_length
UpperCamelCase = self.model_tester.num_frames
UpperCamelCase = True
UpperCamelCase = False
UpperCamelCase = True
UpperCamelCase = model_class(A_ )
model.to(A_ )
model.eval()
with torch.no_grad():
UpperCamelCase = model(**self._prepare_for_class(A_ , A_ ) )
UpperCamelCase = outputs.attentions
self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCamelCase = True
UpperCamelCase = model_class(A_ )
model.to(A_ )
model.eval()
with torch.no_grad():
UpperCamelCase = model(**self._prepare_for_class(A_ , A_ ) )
UpperCamelCase = outputs.attentions
self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
UpperCamelCase = len(A_ )
# Check attention is always last and order is fine
UpperCamelCase = True
UpperCamelCase = True
UpperCamelCase = model_class(A_ )
model.to(A_ )
model.eval()
with torch.no_grad():
UpperCamelCase = model(**self._prepare_for_class(A_ , A_ ) )
self.assertEqual(out_len + 1 , len(A_ ) )
UpperCamelCase = outputs.attentions
self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
def check_hidden_states_output(A_ , A_ , A_ ):
UpperCamelCase = model_class(A_ )
model.to(A_ )
model.eval()
with torch.no_grad():
UpperCamelCase = model(**self._prepare_for_class(A_ , A_ ) )
UpperCamelCase = outputs.hidden_states
UpperCamelCase = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(A_ ) , A_ )
UpperCamelCase = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = True
check_hidden_states_output(A_ , A_ , A_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase = True
check_hidden_states_output(A_ , A_ , A_ )
def A ( ) -> int:
'''simple docstring'''
UpperCamelCase = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename='eating_spaghetti.npy' , repo_type='dataset' )
UpperCamelCase = np.load(lowercase )
return list(lowercase )
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
UpperCamelCase = TimesformerForVideoClassification.from_pretrained('facebook/timesformer-base-finetuned-k400' ).to(
A_ )
UpperCamelCase = self.default_image_processor
UpperCamelCase = prepare_video()
UpperCamelCase = image_processor(video[:8] , return_tensors='pt' ).to(A_ )
# forward pass
with torch.no_grad():
UpperCamelCase = model(**A_ )
# verify the logits
UpperCamelCase = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , A_ )
UpperCamelCase = torch.tensor([-0.3016, -0.7713, -0.4205] ).to(A_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A_ , atol=1e-4 ) ) | 702 |
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class lowercase ( _SCREAMING_SNAKE_CASE ):
__lowercase : Dict = (DDPMScheduler,)
def __UpperCamelCase ( self , **A_ ) -> Dict:
"""simple docstring"""
UpperCamelCase = {
'num_train_timesteps': 1_000,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'variance_type': 'fixed_small',
'clip_sample': True,
}
config.update(**A_ )
return config
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=A_ )
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=A_ , beta_end=A_ )
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=A_ )
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=A_ )
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=A_ )
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
self.check_over_configs(thresholding=A_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=A_ , prediction_type=A_ , sample_max_value=A_ , )
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=A_ )
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
for t in [0, 500, 999]:
self.check_over_forward(time_step=A_ )
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**A_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1e-5
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**A_ )
UpperCamelCase = len(A_ )
UpperCamelCase = self.dummy_model()
UpperCamelCase = self.dummy_sample_deter
UpperCamelCase = torch.manual_seed(0 )
for t in reversed(range(A_ ) ):
# 1. predict noise residual
UpperCamelCase = model(A_ , A_ )
# 2. predict previous mean of sample x_t-1
UpperCamelCase = scheduler.step(A_ , A_ , A_ , generator=A_ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
UpperCamelCase = pred_prev_sample
UpperCamelCase = torch.sum(torch.abs(A_ ) )
UpperCamelCase = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 258.9606 ) < 1e-2
assert abs(result_mean.item() - 0.3372 ) < 1e-3
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config(prediction_type='v_prediction' )
UpperCamelCase = scheduler_class(**A_ )
UpperCamelCase = len(A_ )
UpperCamelCase = self.dummy_model()
UpperCamelCase = self.dummy_sample_deter
UpperCamelCase = torch.manual_seed(0 )
for t in reversed(range(A_ ) ):
# 1. predict noise residual
UpperCamelCase = model(A_ , A_ )
# 2. predict previous mean of sample x_t-1
UpperCamelCase = scheduler.step(A_ , A_ , A_ , generator=A_ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
UpperCamelCase = pred_prev_sample
UpperCamelCase = torch.sum(torch.abs(A_ ) )
UpperCamelCase = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 202.0296 ) < 1e-2
assert abs(result_mean.item() - 0.2631 ) < 1e-3
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**A_ )
UpperCamelCase = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=A_ )
UpperCamelCase = scheduler.timesteps
for i, timestep in enumerate(A_ ):
if i == len(A_ ) - 1:
UpperCamelCase = -1
else:
UpperCamelCase = timesteps[i + 1]
UpperCamelCase = scheduler.previous_timestep(A_ )
UpperCamelCase = prev_t.item()
self.assertEqual(A_ , A_ )
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**A_ )
UpperCamelCase = [100, 87, 50, 51, 0]
with self.assertRaises(A_ , msg='`custom_timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=A_ )
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**A_ )
UpperCamelCase = [100, 87, 50, 1, 0]
UpperCamelCase = len(A_ )
with self.assertRaises(A_ , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=A_ , timesteps=A_ )
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**A_ )
UpperCamelCase = [scheduler.config.num_train_timesteps]
with self.assertRaises(
A_ , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=A_ )
| 3 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_UpperCAmelCase : str = {
"configuration_encodec": [
"ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EncodecConfig",
],
"feature_extraction_encodec": ["EncodecFeatureExtractor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Any = [
"ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST",
"EncodecModel",
"EncodecPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 703 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
_UpperCAmelCase : List[str] = None
_UpperCAmelCase : Any = logging.get_logger(__name__)
_UpperCAmelCase : Tuple = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
_UpperCAmelCase : List[str] = {
"vocab_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model",
},
"tokenizer_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/tokenizer.json",
},
}
_UpperCAmelCase : Optional[int] = {
"camembert-base": 512,
}
_UpperCAmelCase : Union[str, Any] = "▁"
class lowercase ( _SCREAMING_SNAKE_CASE ):
__lowercase : str = VOCAB_FILES_NAMES
__lowercase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
__lowercase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : List[str] = ["input_ids", "attention_mask"]
__lowercase : Tuple = CamembertTokenizer
def __init__( self , A_=None , A_=None , A_="<s>" , A_="</s>" , A_="</s>" , A_="<s>" , A_="<unk>" , A_="<pad>" , A_="<mask>" , A_=["<s>NOTUSED", "</s>NOTUSED"] , **A_ , ) -> List[Any]:
"""simple docstring"""
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else mask_token
super().__init__(
A_ , tokenizer_file=A_ , bos_token=A_ , eos_token=A_ , sep_token=A_ , cls_token=A_ , unk_token=A_ , pad_token=A_ , mask_token=A_ , additional_special_tokens=A_ , **A_ , )
UpperCamelCase = vocab_file
UpperCamelCase = False if not self.vocab_file else True
def __UpperCamelCase ( self , A_ , A_ = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
UpperCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __UpperCamelCase ( self , A_ , A_ = None ) -> List[int]:
"""simple docstring"""
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCamelCase ( self , A_ , A_ = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(A_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCamelCase = os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ):
copyfile(self.vocab_file , A_ )
return (out_vocab_file,)
| 3 | 0 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class lowercase ( unittest.TestCase ):
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = tempfile.mkdtemp()
UpperCamelCase = BlipImageProcessor()
UpperCamelCase = GPTaTokenizer.from_pretrained('hf-internal-testing/tiny-random-GPT2Model' )
UpperCamelCase = BlipaProcessor(A_ , A_ )
processor.save_pretrained(self.tmpdirname )
def __UpperCamelCase ( self , **A_ ) -> int:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **A_ ).tokenizer
def __UpperCamelCase ( self , **A_ ) -> Optional[Any]:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **A_ ).image_processor
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
UpperCamelCase = [Image.fromarray(np.moveaxis(A_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
UpperCamelCase = self.get_image_processor(do_normalize=A_ , padding_value=1.0 )
UpperCamelCase = BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=A_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , A_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , A_ )
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = BlipaProcessor(tokenizer=A_ , image_processor=A_ )
UpperCamelCase = self.prepare_image_inputs()
UpperCamelCase = image_processor(A_ , return_tensors='np' )
UpperCamelCase = processor(images=A_ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = BlipaProcessor(tokenizer=A_ , image_processor=A_ )
UpperCamelCase = 'lower newer'
UpperCamelCase = processor(text=A_ )
UpperCamelCase = tokenizer(A_ , return_token_type_ids=A_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = BlipaProcessor(tokenizer=A_ , image_processor=A_ )
UpperCamelCase = 'lower newer'
UpperCamelCase = self.prepare_image_inputs()
UpperCamelCase = processor(text=A_ , images=A_ )
self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'input_ids', 'attention_mask'] )
# test if it raises when no input is passed
with pytest.raises(A_ ):
processor()
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = BlipaProcessor(tokenizer=A_ , image_processor=A_ )
UpperCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase = processor.batch_decode(A_ )
UpperCamelCase = tokenizer.batch_decode(A_ )
self.assertListEqual(A_ , A_ )
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = BlipaProcessor(tokenizer=A_ , image_processor=A_ )
UpperCamelCase = 'lower newer'
UpperCamelCase = self.prepare_image_inputs()
UpperCamelCase = processor(text=A_ , images=A_ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'input_ids', 'attention_mask'] )
| 704 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCAmelCase : Union[str, Any] = {
"configuration_git": ["GIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "GitConfig", "GitVisionConfig"],
"processing_git": ["GitProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Dict = [
"GIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"GitForCausalLM",
"GitModel",
"GitPreTrainedModel",
"GitVisionModel",
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
_UpperCAmelCase : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 3 | 0 |
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
_UpperCAmelCase : str = logging.getLogger(__name__)
_UpperCAmelCase : List[str] = "pytorch_model.bin"
@dataclasses.dataclass
class lowercase :
__lowercase : str = dataclasses.field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models."} )
__lowercase : Optional[str] = dataclasses.field(
default=_SCREAMING_SNAKE_CASE , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co."} , )
@dataclasses.dataclass
class lowercase :
__lowercase : str = dataclasses.field(metadata={"help": "A csv or a json file containing the training data."} )
__lowercase : str = dataclasses.field(metadata={"help": "A csv or a json file containing the data to predict on."} )
__lowercase : Optional[str] = dataclasses.field(
default=_SCREAMING_SNAKE_CASE , metadata={"help": "A csv or a json file containing the validation data."} )
__lowercase : Optional[str] = dataclasses.field(
default=_SCREAMING_SNAKE_CASE , metadata={"help": "The name of the task to train on."} , )
__lowercase : Optional[List[str]] = dataclasses.field(
default=_SCREAMING_SNAKE_CASE , metadata={"help": "The list of labels for the task."} )
@dataclasses.dataclass
class lowercase :
__lowercase : str = dataclasses.field(
metadata={"help": "The output directory where the model predictions and checkpoints will be written."} )
__lowercase : Optional[str] = dataclasses.field(
default="accuracy" , metadata={"help": "The evaluation metric used for the task."} )
__lowercase : Optional[str] = dataclasses.field(
default="no" , metadata={
"help": "The evaluation strategy to adopt during training. Possible values are: [\"no\", \"step\", \"epoch]"
} , )
__lowercase : Optional[int] = dataclasses.field(
default=10 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , )
__lowercase : Optional[float] = dataclasses.field(
default=0.0 , metadata={
"help": "How much the specified evaluation metric must improve to satisfy early stopping conditions."
} , )
__lowercase : Optional[bool] = dataclasses.field(
default=_SCREAMING_SNAKE_CASE , metadata={"help": "Whether to filter the pseudo-labeled data based on the confidence score."} , )
__lowercase : Optional[bool] = dataclasses.field(
default=_SCREAMING_SNAKE_CASE , metadata={"help": "Whether to filter the pseudo-labeled data based on the validation performance."} , )
__lowercase : Optional[bool] = dataclasses.field(
default=_SCREAMING_SNAKE_CASE , metadata={"help": "Whether to fine-tune on labeled data after pseudo training."} , )
__lowercase : Optional[float] = dataclasses.field(
default=0.0 , metadata={"help": "Confidence threshold for pseudo-labeled data filtering."} , )
__lowercase : Optional[int] = dataclasses.field(
default=100 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , )
__lowercase : Optional[int] = dataclasses.field(
default=_SCREAMING_SNAKE_CASE , metadata={"help": "Random seed for initialization."} , )
def A ( lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
UpperCamelCase = dataset.filter(lambda lowercase : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
UpperCamelCase = int(eval_result * len(lowercase ) )
print(lowercase )
UpperCamelCase = dataset.sort('probability' , reverse=lowercase )
UpperCamelCase = dataset.select(range(lowercase ) )
UpperCamelCase = dataset.remove_columns(['label', 'probability'] )
UpperCamelCase = dataset.rename_column('prediction' , 'label' )
UpperCamelCase = dataset.map(lambda lowercase : {"label": idalabel[example["label"]]} )
UpperCamelCase = dataset.shuffle(seed=args.seed )
UpperCamelCase = os.path.join(lowercase , f'''train_pseudo.{args.data_file_extension}''' )
if args.data_file_extension == "csv":
dataset.to_csv(lowercase , index=lowercase )
else:
dataset.to_json(lowercase )
def A ( lowercase , lowercase , lowercase , lowercase , **lowercase ) -> int:
'''simple docstring'''
UpperCamelCase = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
UpperCamelCase = STModelArguments(model_name_or_path=lowercase )
UpperCamelCase = STDataArguments(train_file=lowercase , infer_file=lowercase )
UpperCamelCase = STTrainingArguments(output_dir=lowercase )
UpperCamelCase = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(lowercase ).items():
setattr(lowercase , lowercase , lowercase )
for key, value in kwargs.items():
if hasattr(lowercase , lowercase ):
setattr(lowercase , lowercase , lowercase )
# Sanity checks
UpperCamelCase = {}
UpperCamelCase = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
UpperCamelCase = args.train_file
UpperCamelCase = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
UpperCamelCase = args.eval_file
for key in data_files:
UpperCamelCase = data_files[key].split('.' )[-1]
assert extension in ["csv", "json"], f'''`{key}_file` should be a csv or a json file.'''
if args.data_file_extension is None:
UpperCamelCase = extension
else:
assert extension == args.data_file_extension, f'''`{key}_file` should be a {args.data_file_extension} file`.'''
assert (
args.eval_metric in datasets.list_metrics()
), f'''{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.'''
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info('Creating the initial data directory for self-training...' )
UpperCamelCase = f'''{args.output_dir}/self-train_iter-{{}}'''.format
UpperCamelCase = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=lowercase )
os.makedirs(lowercase , exist_ok=lowercase )
accelerator.wait_for_everyone()
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = 0
UpperCamelCase = False
# Show the progress bar
UpperCamelCase = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
UpperCamelCase = data_dir_format(lowercase )
assert os.path.exists(lowercase )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
UpperCamelCase = os.path.join(lowercase , 'stage-1' )
UpperCamelCase = {
'accelerator': accelerator,
'model_name_or_path': args.model_name_or_path,
'cache_dir': args.cache_dir,
'do_train': True,
'train_file': data_files['train'] if iteration == 0 else data_files['train_pseudo'],
'do_eval': True if args.eval_file is not None else False,
'eval_file': data_files['eval'],
'do_predict': True,
'infer_file': data_files['infer'],
'task_name': args.task_name,
'label_list': args.label_list,
'output_dir': current_output_dir,
'eval_metric': args.eval_metric,
'evaluation_strategy': args.evaluation_strategy,
'early_stopping_patience': args.early_stopping_patience,
'early_stopping_threshold': args.early_stopping_threshold,
'seed': args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(lowercase , lowercase ):
arguments_dict.update({key: value} )
UpperCamelCase = os.path.join(lowercase , 'best-checkpoint' , lowercase )
if os.path.exists(lowercase ):
logger.info(
'Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.' , lowercase , lowercase , )
else:
logger.info('***** Running self-training: iteration: %d, stage: 1 *****' , lowercase )
finetune(**lowercase )
accelerator.wait_for_everyone()
assert os.path.exists(lowercase )
logger.info('Self-training job completed: iteration: %d, stage: 1.' , lowercase )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
UpperCamelCase = os.path.join(lowercase , 'best-checkpoint' )
UpperCamelCase = os.path.join(lowercase , 'stage-2' )
# Update arguments_dict
UpperCamelCase = model_path
UpperCamelCase = data_files['train']
UpperCamelCase = current_output_dir
UpperCamelCase = os.path.join(lowercase , 'best-checkpoint' , lowercase )
if os.path.exists(lowercase ):
logger.info(
'Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.' , lowercase , lowercase , )
else:
logger.info('***** Running self-training: iteration: %d, stage: 2 *****' , lowercase )
finetune(**lowercase )
accelerator.wait_for_everyone()
assert os.path.exists(lowercase )
logger.info('Self-training job completed: iteration: %d, stage: 2.' , lowercase )
UpperCamelCase = iteration
UpperCamelCase = data_dir_format(iteration + 1 )
UpperCamelCase = AutoConfig.from_pretrained(os.path.join(lowercase , 'best-checkpoint' ) )
UpperCamelCase = config.idalabel
UpperCamelCase = os.path.join(lowercase , 'eval_results_best-checkpoint.json' )
UpperCamelCase = os.path.join(lowercase , 'test_results_best-checkpoint.json' )
assert os.path.exists(lowercase )
with open(lowercase , 'r' ) as f:
UpperCamelCase = float(json.load(lowercase )[args.eval_metric] )
UpperCamelCase = os.path.join(lowercase , 'infer_output_best-checkpoint.csv' )
assert os.path.exists(lowercase )
# Loading the dataset from local csv or json files.
UpperCamelCase = load_dataset(args.data_file_extension , data_files={'data': data_files['infer']} )['data']
UpperCamelCase = load_dataset('csv' , data_files={'data': infer_output_file} )['data']
if accelerator.is_main_process:
os.makedirs(lowercase , exist_ok=lowercase )
shutil.copy(lowercase , os.path.join(lowercase , f'''eval_results_iter-{iteration}.json''' ) )
if os.path.exists(lowercase ):
shutil.copy(lowercase , os.path.join(lowercase , f'''test_results_iter-{iteration}.json''' ) )
create_pseudo_labeled_data(lowercase , lowercase , lowercase , lowercase , lowercase , lowercase )
accelerator.wait_for_everyone()
UpperCamelCase = os.path.join(lowercase , f'''train_pseudo.{args.data_file_extension}''' )
if args.evaluation_strategy != IntervalStrategy.NO.value:
UpperCamelCase = eval_result
if best_iteration is None:
UpperCamelCase = new_iteration
UpperCamelCase = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
UpperCamelCase = new_iteration
UpperCamelCase = new_eval_result
UpperCamelCase = 0
else:
if new_eval_result == best_eval_result:
UpperCamelCase = new_iteration
UpperCamelCase = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
UpperCamelCase = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info('Best iteration: %d' , lowercase )
logger.info('Best evaluation result: %s = %f' , args.eval_metric , lowercase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(lowercase , f'''eval_results_iter-{iteration}.json''' ) , os.path.join(lowercase , 'eval_results_best-iteration.json' ) , )
else:
# Assume that the last iteration is the best
logger.info('Best iteration: %d' , args.max_selftrain_iterations - 1 )
logger.info('Best evaluation result: %s = %f' , args.eval_metric , lowercase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(lowercase , f'''eval_results_iter-{args.max_selftrain_iterations - 1}.json''' ) , os.path.join(lowercase , 'eval_results_best-iteration.json' ) , )
| 705 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCAmelCase : Tuple = logging.get_logger(__name__)
_UpperCAmelCase : Union[str, Any] = {
"facebook/data2vec-text-base": "https://huggingface.co/data2vec/resolve/main/config.json",
}
class lowercase ( _SCREAMING_SNAKE_CASE ):
__lowercase : Dict = "data2vec-text"
def __init__( self , A_=30_522 , A_=768 , A_=12 , A_=12 , A_=3_072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=2 , A_=0.02 , A_=1e-12 , A_=1 , A_=0 , A_=2 , A_="absolute" , A_=True , A_=None , **A_ , ) -> Any:
"""simple docstring"""
super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ )
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = hidden_act
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
UpperCamelCase = position_embedding_type
UpperCamelCase = use_cache
UpperCamelCase = classifier_dropout
class lowercase ( _SCREAMING_SNAKE_CASE ):
@property
def __UpperCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
UpperCamelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
UpperCamelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 3 | 0 |
def A ( lowercase ) -> list[list[float]]:
'''simple docstring'''
UpperCamelCase = []
for data in source_data:
for i, el in enumerate(lowercase ):
if len(lowercase ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(lowercase ) )
return data_lists
def A ( lowercase , lowercase ) -> list[list[float]]:
'''simple docstring'''
UpperCamelCase = []
for dlist, weight in zip(lowercase , lowercase ):
UpperCamelCase = min(lowercase )
UpperCamelCase = max(lowercase )
UpperCamelCase = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
UpperCamelCase = f'''Invalid weight of {weight:f} provided'''
raise ValueError(lowercase )
score_lists.append(lowercase )
return score_lists
def A ( lowercase ) -> list[float]:
'''simple docstring'''
UpperCamelCase = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(lowercase ):
UpperCamelCase = final_scores[j] + ele
return final_scores
def A ( lowercase , lowercase ) -> list[list[float]]:
'''simple docstring'''
UpperCamelCase = get_data(lowercase )
UpperCamelCase = calculate_each_score(lowercase , lowercase )
UpperCamelCase = generate_final_scores(lowercase )
# append scores to source data
for i, ele in enumerate(lowercase ):
source_data[i].append(lowercase )
return source_data
| 706 |
from random import shuffle
import tensorflow as tf
from numpy import array
def A ( lowercase , lowercase ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase = int(lowercase )
assert noofclusters < len(lowercase )
# Find out the dimensionality
UpperCamelCase = len(vectors[0] )
# Will help select random centroids from among the available vectors
UpperCamelCase = list(range(len(lowercase ) ) )
shuffle(lowercase )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
UpperCamelCase = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
UpperCamelCase = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
UpperCamelCase = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(lowercase )
]
##These nodes will assign the centroid Variables the appropriate
##values
UpperCamelCase = tf.placeholder('float64' , [dim] )
UpperCamelCase = []
for centroid in centroids:
cent_assigns.append(tf.assign(lowercase , lowercase ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
UpperCamelCase = [tf.Variable(0 ) for i in range(len(lowercase ) )]
##These nodes will assign an assignment Variable the appropriate
##value
UpperCamelCase = tf.placeholder('int32' )
UpperCamelCase = []
for assignment in assignments:
cluster_assigns.append(tf.assign(lowercase , lowercase ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
UpperCamelCase = tf.placeholder('float' , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
UpperCamelCase = tf.reduce_mean(lowercase , 0 )
##Node for computing Euclidean distances
# Placeholders for input
UpperCamelCase = tf.placeholder('float' , [dim] )
UpperCamelCase = tf.placeholder('float' , [dim] )
UpperCamelCase = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(lowercase , lowercase ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
UpperCamelCase = tf.placeholder('float' , [noofclusters] )
UpperCamelCase = tf.argmin(lowercase , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
UpperCamelCase = tf.initialize_all_variables()
# Initialize all variables
sess.run(lowercase )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
UpperCamelCase = 100
for _ in range(lowercase ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(lowercase ) ):
UpperCamelCase = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
UpperCamelCase = [
sess.run(lowercase , feed_dict={va: vect, va: sess.run(lowercase )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
UpperCamelCase = sess.run(
lowercase , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(lowercase ):
# Collect all the vectors assigned to this cluster
UpperCamelCase = [
vectors[i]
for i in range(len(lowercase ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
UpperCamelCase = sess.run(
lowercase , feed_dict={mean_input: array(lowercase )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
UpperCamelCase = sess.run(lowercase )
UpperCamelCase = sess.run(lowercase )
return centroids, assignments
| 3 | 0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.