code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
from ..utils import DummyObject, requires_backends
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : List[str] = ["torch"]
def __init__( self: Optional[int] ,*lowerCamelCase_: Tuple ,**lowerCamelCase_: Tuple ) -> Optional[Any]:
requires_backends(self ,["""torch"""] )
@classmethod
def A__ ( cls: Dict ,*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: Union[str, Any] ) -> Tuple:
requires_backends(cls ,["""torch"""] )
@classmethod
def A__ ( cls: List[str] ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: Optional[int] ) -> Optional[int]:
requires_backends(cls ,["""torch"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Union[str, Any] = ["torch"]
def __init__( self: List[Any] ,*lowerCamelCase_: Any ,**lowerCamelCase_: Optional[Any] ) -> Any:
requires_backends(self ,["""torch"""] )
@classmethod
def A__ ( cls: Tuple ,*lowerCamelCase_: int ,**lowerCamelCase_: int ) -> Optional[int]:
requires_backends(cls ,["""torch"""] )
@classmethod
def A__ ( cls: Dict ,*lowerCamelCase_: Union[str, Any] ,**lowerCamelCase_: Tuple ) -> str:
requires_backends(cls ,["""torch"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Union[str, Any] = ["torch"]
def __init__( self: Any ,*lowerCamelCase_: Dict ,**lowerCamelCase_: int ) -> List[str]:
requires_backends(self ,["""torch"""] )
@classmethod
def A__ ( cls: Any ,*lowerCamelCase_: Any ,**lowerCamelCase_: int ) -> Dict:
requires_backends(cls ,["""torch"""] )
@classmethod
def A__ ( cls: List[Any] ,*lowerCamelCase_: Dict ,**lowerCamelCase_: str ) -> List[Any]:
requires_backends(cls ,["""torch"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Optional[int] = ["torch"]
def __init__( self: List[Any] ,*lowerCamelCase_: Dict ,**lowerCamelCase_: Dict ) -> Any:
requires_backends(self ,["""torch"""] )
@classmethod
def A__ ( cls: str ,*lowerCamelCase_: Union[str, Any] ,**lowerCamelCase_: int ) -> Any:
requires_backends(cls ,["""torch"""] )
@classmethod
def A__ ( cls: Any ,*lowerCamelCase_: int ,**lowerCamelCase_: int ) -> List[Any]:
requires_backends(cls ,["""torch"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Tuple = ["torch"]
def __init__( self: Union[str, Any] ,*lowerCamelCase_: Optional[int] ,**lowerCamelCase_: int ) -> int:
requires_backends(self ,["""torch"""] )
@classmethod
def A__ ( cls: Optional[int] ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: Optional[int] ) -> Tuple:
requires_backends(cls ,["""torch"""] )
@classmethod
def A__ ( cls: Any ,*lowerCamelCase_: Union[str, Any] ,**lowerCamelCase_: List[str] ) -> Optional[int]:
requires_backends(cls ,["""torch"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Optional[int] = ["torch"]
def __init__( self: Dict ,*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: str ) -> Optional[int]:
requires_backends(self ,["""torch"""] )
@classmethod
def A__ ( cls: Optional[Any] ,*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: Optional[int] ) -> str:
requires_backends(cls ,["""torch"""] )
@classmethod
def A__ ( cls: Tuple ,*lowerCamelCase_: List[str] ,**lowerCamelCase_: Optional[int] ) -> str:
requires_backends(cls ,["""torch"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Union[str, Any] = ["torch"]
def __init__( self: int ,*lowerCamelCase_: Union[str, Any] ,**lowerCamelCase_: Dict ) -> str:
requires_backends(self ,["""torch"""] )
@classmethod
def A__ ( cls: Union[str, Any] ,*lowerCamelCase_: Tuple ,**lowerCamelCase_: Tuple ) -> List[Any]:
requires_backends(cls ,["""torch"""] )
@classmethod
def A__ ( cls: List[Any] ,*lowerCamelCase_: str ,**lowerCamelCase_: Any ) -> Dict:
requires_backends(cls ,["""torch"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : List[Any] = ["torch"]
def __init__( self: Union[str, Any] ,*lowerCamelCase_: Any ,**lowerCamelCase_: Tuple ) -> Optional[int]:
requires_backends(self ,["""torch"""] )
@classmethod
def A__ ( cls: Optional[Any] ,*lowerCamelCase_: int ,**lowerCamelCase_: Any ) -> Optional[Any]:
requires_backends(cls ,["""torch"""] )
@classmethod
def A__ ( cls: str ,*lowerCamelCase_: str ,**lowerCamelCase_: List[Any] ) -> Optional[Any]:
requires_backends(cls ,["""torch"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Dict = ["torch"]
def __init__( self: Optional[Any] ,*lowerCamelCase_: Tuple ,**lowerCamelCase_: Dict ) -> List[str]:
requires_backends(self ,["""torch"""] )
@classmethod
def A__ ( cls: Optional[int] ,*lowerCamelCase_: str ,**lowerCamelCase_: Dict ) -> Tuple:
requires_backends(cls ,["""torch"""] )
@classmethod
def A__ ( cls: Dict ,*lowerCamelCase_: Dict ,**lowerCamelCase_: Union[str, Any] ) -> Any:
requires_backends(cls ,["""torch"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Any = ["torch"]
def __init__( self: Optional[int] ,*lowerCamelCase_: Union[str, Any] ,**lowerCamelCase_: Dict ) -> Dict:
requires_backends(self ,["""torch"""] )
@classmethod
def A__ ( cls: Dict ,*lowerCamelCase_: str ,**lowerCamelCase_: List[str] ) -> Any:
requires_backends(cls ,["""torch"""] )
@classmethod
def A__ ( cls: Optional[Any] ,*lowerCamelCase_: Dict ,**lowerCamelCase_: Optional[int] ) -> Union[str, Any]:
requires_backends(cls ,["""torch"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Optional[Any] = ["torch"]
def __init__( self: Tuple ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: str ) -> str:
requires_backends(self ,["""torch"""] )
@classmethod
def A__ ( cls: str ,*lowerCamelCase_: List[str] ,**lowerCamelCase_: int ) -> Tuple:
requires_backends(cls ,["""torch"""] )
@classmethod
def A__ ( cls: Optional[Any] ,*lowerCamelCase_: Tuple ,**lowerCamelCase_: Any ) -> Any:
requires_backends(cls ,["""torch"""] )
def lowerCamelCase_ ( *_a : Any , **_a : Dict ):
'''simple docstring'''
requires_backends(_a , ["""torch"""] )
def lowerCamelCase_ ( *_a : str , **_a : Dict ):
'''simple docstring'''
requires_backends(_a , ["""torch"""] )
def lowerCamelCase_ ( *_a : List[str] , **_a : str ):
'''simple docstring'''
requires_backends(_a , ["""torch"""] )
def lowerCamelCase_ ( *_a : int , **_a : str ):
'''simple docstring'''
requires_backends(_a , ["""torch"""] )
def lowerCamelCase_ ( *_a : Any , **_a : List[Any] ):
'''simple docstring'''
requires_backends(_a , ["""torch"""] )
def lowerCamelCase_ ( *_a : str , **_a : str ):
'''simple docstring'''
requires_backends(_a , ["""torch"""] )
def lowerCamelCase_ ( *_a : Optional[int] , **_a : List[Any] ):
'''simple docstring'''
requires_backends(_a , ["""torch"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : List[Any] = ["torch"]
def __init__( self: Optional[Any] ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: Optional[int] ) -> str:
requires_backends(self ,["""torch"""] )
@classmethod
def A__ ( cls: Optional[int] ,*lowerCamelCase_: Tuple ,**lowerCamelCase_: Any ) -> str:
requires_backends(cls ,["""torch"""] )
@classmethod
def A__ ( cls: int ,*lowerCamelCase_: str ,**lowerCamelCase_: Optional[int] ) -> int:
requires_backends(cls ,["""torch"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : List[Any] = ["torch"]
def __init__( self: List[Any] ,*lowerCamelCase_: Any ,**lowerCamelCase_: Optional[int] ) -> Optional[Any]:
requires_backends(self ,["""torch"""] )
@classmethod
def A__ ( cls: Union[str, Any] ,*lowerCamelCase_: Optional[int] ,**lowerCamelCase_: Optional[Any] ) -> Optional[int]:
requires_backends(cls ,["""torch"""] )
@classmethod
def A__ ( cls: Tuple ,*lowerCamelCase_: Tuple ,**lowerCamelCase_: int ) -> List[Any]:
requires_backends(cls ,["""torch"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Optional[Any] = ["torch"]
def __init__( self: Tuple ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: List[str] ) -> List[str]:
requires_backends(self ,["""torch"""] )
@classmethod
def A__ ( cls: Tuple ,*lowerCamelCase_: Dict ,**lowerCamelCase_: Optional[int] ) -> List[Any]:
requires_backends(cls ,["""torch"""] )
@classmethod
def A__ ( cls: str ,*lowerCamelCase_: Dict ,**lowerCamelCase_: List[Any] ) -> Union[str, Any]:
requires_backends(cls ,["""torch"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Optional[Any] = ["torch"]
def __init__( self: Union[str, Any] ,*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: Optional[Any] ) -> Tuple:
requires_backends(self ,["""torch"""] )
@classmethod
def A__ ( cls: Dict ,*lowerCamelCase_: List[str] ,**lowerCamelCase_: Tuple ) -> int:
requires_backends(cls ,["""torch"""] )
@classmethod
def A__ ( cls: Optional[Any] ,*lowerCamelCase_: List[str] ,**lowerCamelCase_: Dict ) -> int:
requires_backends(cls ,["""torch"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : int = ["torch"]
def __init__( self: Optional[Any] ,*lowerCamelCase_: int ,**lowerCamelCase_: int ) -> List[str]:
requires_backends(self ,["""torch"""] )
@classmethod
def A__ ( cls: str ,*lowerCamelCase_: int ,**lowerCamelCase_: List[Any] ) -> str:
requires_backends(cls ,["""torch"""] )
@classmethod
def A__ ( cls: Dict ,*lowerCamelCase_: Optional[int] ,**lowerCamelCase_: Tuple ) -> List[str]:
requires_backends(cls ,["""torch"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : List[str] = ["torch"]
def __init__( self: List[str] ,*lowerCamelCase_: Any ,**lowerCamelCase_: str ) -> List[Any]:
requires_backends(self ,["""torch"""] )
@classmethod
def A__ ( cls: List[str] ,*lowerCamelCase_: str ,**lowerCamelCase_: Any ) -> Optional[Any]:
requires_backends(cls ,["""torch"""] )
@classmethod
def A__ ( cls: Dict ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: Dict ) -> List[str]:
requires_backends(cls ,["""torch"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Union[str, Any] = ["torch"]
def __init__( self: List[Any] ,*lowerCamelCase_: str ,**lowerCamelCase_: Tuple ) -> str:
requires_backends(self ,["""torch"""] )
@classmethod
def A__ ( cls: List[str] ,*lowerCamelCase_: Union[str, Any] ,**lowerCamelCase_: str ) -> int:
requires_backends(cls ,["""torch"""] )
@classmethod
def A__ ( cls: str ,*lowerCamelCase_: Dict ,**lowerCamelCase_: List[str] ) -> Union[str, Any]:
requires_backends(cls ,["""torch"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Union[str, Any] = ["torch"]
def __init__( self: Optional[int] ,*lowerCamelCase_: Tuple ,**lowerCamelCase_: Dict ) -> Any:
requires_backends(self ,["""torch"""] )
@classmethod
def A__ ( cls: int ,*lowerCamelCase_: Tuple ,**lowerCamelCase_: List[str] ) -> Tuple:
requires_backends(cls ,["""torch"""] )
@classmethod
def A__ ( cls: List[str] ,*lowerCamelCase_: Union[str, Any] ,**lowerCamelCase_: List[str] ) -> Optional[Any]:
requires_backends(cls ,["""torch"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Tuple = ["torch"]
def __init__( self: Any ,*lowerCamelCase_: Dict ,**lowerCamelCase_: Any ) -> List[Any]:
requires_backends(self ,["""torch"""] )
@classmethod
def A__ ( cls: str ,*lowerCamelCase_: Dict ,**lowerCamelCase_: int ) -> Dict:
requires_backends(cls ,["""torch"""] )
@classmethod
def A__ ( cls: str ,*lowerCamelCase_: Union[str, Any] ,**lowerCamelCase_: Optional[Any] ) -> int:
requires_backends(cls ,["""torch"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Union[str, Any] = ["torch"]
def __init__( self: int ,*lowerCamelCase_: List[str] ,**lowerCamelCase_: Tuple ) -> List[str]:
requires_backends(self ,["""torch"""] )
@classmethod
def A__ ( cls: Optional[int] ,*lowerCamelCase_: Any ,**lowerCamelCase_: Dict ) -> Optional[int]:
requires_backends(cls ,["""torch"""] )
@classmethod
def A__ ( cls: Optional[int] ,*lowerCamelCase_: Any ,**lowerCamelCase_: Optional[Any] ) -> Tuple:
requires_backends(cls ,["""torch"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : List[str] = ["torch"]
def __init__( self: str ,*lowerCamelCase_: Tuple ,**lowerCamelCase_: Union[str, Any] ) -> List[str]:
requires_backends(self ,["""torch"""] )
@classmethod
def A__ ( cls: Any ,*lowerCamelCase_: List[str] ,**lowerCamelCase_: List[str] ) -> Union[str, Any]:
requires_backends(cls ,["""torch"""] )
@classmethod
def A__ ( cls: Any ,*lowerCamelCase_: Any ,**lowerCamelCase_: List[str] ) -> List[Any]:
requires_backends(cls ,["""torch"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : List[str] = ["torch"]
def __init__( self: Dict ,*lowerCamelCase_: List[str] ,**lowerCamelCase_: Dict ) -> List[Any]:
requires_backends(self ,["""torch"""] )
@classmethod
def A__ ( cls: Dict ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: Dict ) -> Any:
requires_backends(cls ,["""torch"""] )
@classmethod
def A__ ( cls: int ,*lowerCamelCase_: Union[str, Any] ,**lowerCamelCase_: Optional[Any] ) -> str:
requires_backends(cls ,["""torch"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Dict = ["torch"]
def __init__( self: str ,*lowerCamelCase_: int ,**lowerCamelCase_: Dict ) -> Optional[Any]:
requires_backends(self ,["""torch"""] )
@classmethod
def A__ ( cls: Dict ,*lowerCamelCase_: Union[str, Any] ,**lowerCamelCase_: List[Any] ) -> List[Any]:
requires_backends(cls ,["""torch"""] )
@classmethod
def A__ ( cls: Any ,*lowerCamelCase_: Union[str, Any] ,**lowerCamelCase_: List[Any] ) -> Dict:
requires_backends(cls ,["""torch"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Optional[int] = ["torch"]
def __init__( self: Optional[Any] ,*lowerCamelCase_: int ,**lowerCamelCase_: int ) -> str:
requires_backends(self ,["""torch"""] )
@classmethod
def A__ ( cls: List[Any] ,*lowerCamelCase_: Dict ,**lowerCamelCase_: Any ) -> List[str]:
requires_backends(cls ,["""torch"""] )
@classmethod
def A__ ( cls: Tuple ,*lowerCamelCase_: List[str] ,**lowerCamelCase_: int ) -> Dict:
requires_backends(cls ,["""torch"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Tuple = ["torch"]
def __init__( self: Tuple ,*lowerCamelCase_: int ,**lowerCamelCase_: Tuple ) -> int:
requires_backends(self ,["""torch"""] )
@classmethod
def A__ ( cls: Dict ,*lowerCamelCase_: Tuple ,**lowerCamelCase_: Tuple ) -> int:
requires_backends(cls ,["""torch"""] )
@classmethod
def A__ ( cls: List[str] ,*lowerCamelCase_: Tuple ,**lowerCamelCase_: Any ) -> List[str]:
requires_backends(cls ,["""torch"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : List[Any] = ["torch"]
def __init__( self: List[Any] ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: Optional[int] ) -> Optional[int]:
requires_backends(self ,["""torch"""] )
@classmethod
def A__ ( cls: int ,*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: Union[str, Any] ) -> List[str]:
requires_backends(cls ,["""torch"""] )
@classmethod
def A__ ( cls: Tuple ,*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: str ) -> Tuple:
requires_backends(cls ,["""torch"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Any = ["torch"]
def __init__( self: List[Any] ,*lowerCamelCase_: str ,**lowerCamelCase_: Optional[Any] ) -> Any:
requires_backends(self ,["""torch"""] )
@classmethod
def A__ ( cls: Any ,*lowerCamelCase_: str ,**lowerCamelCase_: List[Any] ) -> Tuple:
requires_backends(cls ,["""torch"""] )
@classmethod
def A__ ( cls: int ,*lowerCamelCase_: Tuple ,**lowerCamelCase_: Optional[int] ) -> List[Any]:
requires_backends(cls ,["""torch"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Union[str, Any] = ["torch"]
def __init__( self: Dict ,*lowerCamelCase_: Any ,**lowerCamelCase_: Optional[int] ) -> List[str]:
requires_backends(self ,["""torch"""] )
@classmethod
def A__ ( cls: Dict ,*lowerCamelCase_: str ,**lowerCamelCase_: Dict ) -> int:
requires_backends(cls ,["""torch"""] )
@classmethod
def A__ ( cls: List[Any] ,*lowerCamelCase_: int ,**lowerCamelCase_: int ) -> Tuple:
requires_backends(cls ,["""torch"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Tuple = ["torch"]
def __init__( self: Dict ,*lowerCamelCase_: Optional[int] ,**lowerCamelCase_: Dict ) -> List[str]:
requires_backends(self ,["""torch"""] )
@classmethod
def A__ ( cls: Optional[int] ,*lowerCamelCase_: Dict ,**lowerCamelCase_: Union[str, Any] ) -> int:
requires_backends(cls ,["""torch"""] )
@classmethod
def A__ ( cls: Any ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: Dict ) -> str:
requires_backends(cls ,["""torch"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Union[str, Any] = ["torch"]
def __init__( self: str ,*lowerCamelCase_: str ,**lowerCamelCase_: List[Any] ) -> int:
requires_backends(self ,["""torch"""] )
@classmethod
def A__ ( cls: List[str] ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: Dict ) -> List[Any]:
requires_backends(cls ,["""torch"""] )
@classmethod
def A__ ( cls: List[Any] ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: str ) -> Tuple:
requires_backends(cls ,["""torch"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : int = ["torch"]
def __init__( self: Optional[Any] ,*lowerCamelCase_: List[str] ,**lowerCamelCase_: int ) -> str:
requires_backends(self ,["""torch"""] )
@classmethod
def A__ ( cls: List[str] ,*lowerCamelCase_: Any ,**lowerCamelCase_: Optional[Any] ) -> Union[str, Any]:
requires_backends(cls ,["""torch"""] )
@classmethod
def A__ ( cls: Any ,*lowerCamelCase_: Optional[int] ,**lowerCamelCase_: int ) -> Optional[int]:
requires_backends(cls ,["""torch"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Union[str, Any] = ["torch"]
def __init__( self: Tuple ,*lowerCamelCase_: List[str] ,**lowerCamelCase_: List[Any] ) -> int:
requires_backends(self ,["""torch"""] )
@classmethod
def A__ ( cls: List[str] ,*lowerCamelCase_: Union[str, Any] ,**lowerCamelCase_: Union[str, Any] ) -> Optional[int]:
requires_backends(cls ,["""torch"""] )
@classmethod
def A__ ( cls: List[Any] ,*lowerCamelCase_: int ,**lowerCamelCase_: Optional[Any] ) -> Any:
requires_backends(cls ,["""torch"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : str = ["torch"]
def __init__( self: Optional[int] ,*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: Any ) -> List[str]:
requires_backends(self ,["""torch"""] )
@classmethod
def A__ ( cls: Dict ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: Any ) -> List[Any]:
requires_backends(cls ,["""torch"""] )
@classmethod
def A__ ( cls: List[Any] ,*lowerCamelCase_: Optional[int] ,**lowerCamelCase_: str ) -> List[str]:
requires_backends(cls ,["""torch"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : List[str] = ["torch"]
def __init__( self: Optional[Any] ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: str ) -> Optional[int]:
requires_backends(self ,["""torch"""] )
@classmethod
def A__ ( cls: Optional[Any] ,*lowerCamelCase_: Tuple ,**lowerCamelCase_: List[str] ) -> List[str]:
requires_backends(cls ,["""torch"""] )
@classmethod
def A__ ( cls: List[str] ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: Optional[int] ) -> Optional[int]:
requires_backends(cls ,["""torch"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : str = ["torch"]
def __init__( self: List[str] ,*lowerCamelCase_: str ,**lowerCamelCase_: int ) -> Optional[int]:
requires_backends(self ,["""torch"""] )
@classmethod
def A__ ( cls: int ,*lowerCamelCase_: int ,**lowerCamelCase_: Any ) -> Tuple:
requires_backends(cls ,["""torch"""] )
@classmethod
def A__ ( cls: str ,*lowerCamelCase_: Union[str, Any] ,**lowerCamelCase_: Dict ) -> Any:
requires_backends(cls ,["""torch"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Tuple = ["torch"]
def __init__( self: Union[str, Any] ,*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: Dict ) -> Any:
requires_backends(self ,["""torch"""] )
@classmethod
def A__ ( cls: int ,*lowerCamelCase_: Tuple ,**lowerCamelCase_: List[str] ) -> Optional[int]:
requires_backends(cls ,["""torch"""] )
@classmethod
def A__ ( cls: Union[str, Any] ,*lowerCamelCase_: Union[str, Any] ,**lowerCamelCase_: Optional[int] ) -> Optional[Any]:
requires_backends(cls ,["""torch"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Tuple = ["torch"]
def __init__( self: Optional[int] ,*lowerCamelCase_: int ,**lowerCamelCase_: int ) -> List[Any]:
requires_backends(self ,["""torch"""] )
@classmethod
def A__ ( cls: Optional[Any] ,*lowerCamelCase_: List[str] ,**lowerCamelCase_: Dict ) -> List[Any]:
requires_backends(cls ,["""torch"""] )
@classmethod
def A__ ( cls: Any ,*lowerCamelCase_: Dict ,**lowerCamelCase_: int ) -> Union[str, Any]:
requires_backends(cls ,["""torch"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Dict = ["torch"]
def __init__( self: Optional[Any] ,*lowerCamelCase_: List[str] ,**lowerCamelCase_: List[Any] ) -> List[str]:
requires_backends(self ,["""torch"""] )
@classmethod
def A__ ( cls: Union[str, Any] ,*lowerCamelCase_: int ,**lowerCamelCase_: Any ) -> List[Any]:
requires_backends(cls ,["""torch"""] )
@classmethod
def A__ ( cls: Tuple ,*lowerCamelCase_: Dict ,**lowerCamelCase_: Optional[Any] ) -> Dict:
requires_backends(cls ,["""torch"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : str = ["torch"]
def __init__( self: Optional[Any] ,*lowerCamelCase_: Union[str, Any] ,**lowerCamelCase_: Union[str, Any] ) -> Union[str, Any]:
requires_backends(self ,["""torch"""] )
@classmethod
def A__ ( cls: Union[str, Any] ,*lowerCamelCase_: Optional[int] ,**lowerCamelCase_: Optional[Any] ) -> List[Any]:
requires_backends(cls ,["""torch"""] )
@classmethod
def A__ ( cls: List[str] ,*lowerCamelCase_: List[str] ,**lowerCamelCase_: Dict ) -> List[str]:
requires_backends(cls ,["""torch"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : str = ["torch"]
def __init__( self: Union[str, Any] ,*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: int ) -> str:
requires_backends(self ,["""torch"""] )
@classmethod
def A__ ( cls: List[str] ,*lowerCamelCase_: str ,**lowerCamelCase_: Union[str, Any] ) -> Any:
requires_backends(cls ,["""torch"""] )
@classmethod
def A__ ( cls: Union[str, Any] ,*lowerCamelCase_: Any ,**lowerCamelCase_: Optional[Any] ) -> Tuple:
requires_backends(cls ,["""torch"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Dict = ["torch"]
def __init__( self: Optional[int] ,*lowerCamelCase_: Tuple ,**lowerCamelCase_: Tuple ) -> Union[str, Any]:
requires_backends(self ,["""torch"""] )
@classmethod
def A__ ( cls: Tuple ,*lowerCamelCase_: Union[str, Any] ,**lowerCamelCase_: Dict ) -> List[str]:
requires_backends(cls ,["""torch"""] )
@classmethod
def A__ ( cls: Optional[int] ,*lowerCamelCase_: Any ,**lowerCamelCase_: Optional[int] ) -> str:
requires_backends(cls ,["""torch"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Tuple = ["torch"]
def __init__( self: Union[str, Any] ,*lowerCamelCase_: str ,**lowerCamelCase_: List[Any] ) -> Any:
requires_backends(self ,["""torch"""] )
@classmethod
def A__ ( cls: Any ,*lowerCamelCase_: Dict ,**lowerCamelCase_: Any ) -> Union[str, Any]:
requires_backends(cls ,["""torch"""] )
@classmethod
def A__ ( cls: int ,*lowerCamelCase_: int ,**lowerCamelCase_: Any ) -> Optional[Any]:
requires_backends(cls ,["""torch"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Tuple = ["torch"]
def __init__( self: Union[str, Any] ,*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: Optional[Any] ) -> Dict:
requires_backends(self ,["""torch"""] )
@classmethod
def A__ ( cls: Any ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: List[Any] ) -> List[Any]:
requires_backends(cls ,["""torch"""] )
@classmethod
def A__ ( cls: List[Any] ,*lowerCamelCase_: str ,**lowerCamelCase_: List[str] ) -> Any:
requires_backends(cls ,["""torch"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Any = ["torch"]
def __init__( self: Tuple ,*lowerCamelCase_: Union[str, Any] ,**lowerCamelCase_: Dict ) -> Any:
requires_backends(self ,["""torch"""] )
@classmethod
def A__ ( cls: List[str] ,*lowerCamelCase_: Tuple ,**lowerCamelCase_: Any ) -> str:
requires_backends(cls ,["""torch"""] )
@classmethod
def A__ ( cls: Optional[Any] ,*lowerCamelCase_: Any ,**lowerCamelCase_: str ) -> List[str]:
requires_backends(cls ,["""torch"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : List[str] = ["torch"]
def __init__( self: List[str] ,*lowerCamelCase_: Optional[int] ,**lowerCamelCase_: Any ) -> Any:
requires_backends(self ,["""torch"""] )
@classmethod
def A__ ( cls: str ,*lowerCamelCase_: List[str] ,**lowerCamelCase_: Tuple ) -> Any:
requires_backends(cls ,["""torch"""] )
@classmethod
def A__ ( cls: List[Any] ,*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: Union[str, Any] ) -> Optional[int]:
requires_backends(cls ,["""torch"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Dict = ["torch"]
def __init__( self: str ,*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: Union[str, Any] ) -> List[Any]:
requires_backends(self ,["""torch"""] )
@classmethod
def A__ ( cls: str ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: Any ) -> str:
requires_backends(cls ,["""torch"""] )
@classmethod
def A__ ( cls: str ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: int ) -> str:
requires_backends(cls ,["""torch"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Optional[int] = ["torch"]
def __init__( self: Optional[Any] ,*lowerCamelCase_: Any ,**lowerCamelCase_: Optional[Any] ) -> Tuple:
requires_backends(self ,["""torch"""] )
@classmethod
def A__ ( cls: List[Any] ,*lowerCamelCase_: Any ,**lowerCamelCase_: Tuple ) -> List[str]:
requires_backends(cls ,["""torch"""] )
@classmethod
def A__ ( cls: str ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: Any ) -> Tuple:
requires_backends(cls ,["""torch"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Optional[Any] = ["torch"]
def __init__( self: Tuple ,*lowerCamelCase_: List[str] ,**lowerCamelCase_: Tuple ) -> List[str]:
requires_backends(self ,["""torch"""] )
@classmethod
def A__ ( cls: Any ,*lowerCamelCase_: Dict ,**lowerCamelCase_: int ) -> Tuple:
requires_backends(cls ,["""torch"""] )
@classmethod
def A__ ( cls: Any ,*lowerCamelCase_: Tuple ,**lowerCamelCase_: List[Any] ) -> str:
requires_backends(cls ,["""torch"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : List[str] = ["torch"]
def __init__( self: Union[str, Any] ,*lowerCamelCase_: int ,**lowerCamelCase_: str ) -> Dict:
requires_backends(self ,["""torch"""] )
@classmethod
def A__ ( cls: Optional[Any] ,*lowerCamelCase_: int ,**lowerCamelCase_: Optional[Any] ) -> Dict:
requires_backends(cls ,["""torch"""] )
@classmethod
def A__ ( cls: Optional[Any] ,*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: Dict ) -> List[Any]:
requires_backends(cls ,["""torch"""] )
| 345 |
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
# See all LED models at https://huggingface.co/models?filter=LED
UpperCamelCase_ = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
UpperCamelCase_ = {
'''allenai/led-base-16384''': 16384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def lowerCamelCase_ ( ):
'''simple docstring'''
UpperCAmelCase_ : int = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
UpperCAmelCase_ : Dict = bs[:]
UpperCAmelCase_ : Any = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_a )
cs.append(2**8 + n )
n += 1
UpperCAmelCase_ : Any = [chr(_a ) for n in cs]
return dict(zip(_a , _a ) )
def lowerCamelCase_ ( _a : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = set()
UpperCAmelCase_ : List[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase_ : Optional[int] = char
return pairs
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : str = VOCAB_FILES_NAMES
A__ : List[str] = PRETRAINED_VOCAB_FILES_MAP
A__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Optional[int] = ["input_ids", "attention_mask"]
def __init__( self: Union[str, Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Any ,lowerCamelCase_: Union[str, Any]="replace" ,lowerCamelCase_: Optional[Any]="<s>" ,lowerCamelCase_: List[Any]="</s>" ,lowerCamelCase_: List[str]="</s>" ,lowerCamelCase_: int="<s>" ,lowerCamelCase_: int="<unk>" ,lowerCamelCase_: str="<pad>" ,lowerCamelCase_: Optional[Any]="<mask>" ,lowerCamelCase_: List[str]=False ,**lowerCamelCase_: Tuple ,) -> Any:
UpperCAmelCase_ : Union[str, Any] = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else bos_token
UpperCAmelCase_ : int = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else eos_token
UpperCAmelCase_ : List[str] = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else sep_token
UpperCAmelCase_ : List[str] = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else cls_token
UpperCAmelCase_ : Optional[Any] = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else unk_token
UpperCAmelCase_ : List[str] = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase_ : str = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else mask_token
super().__init__(
errors=lowerCamelCase_ ,bos_token=lowerCamelCase_ ,eos_token=lowerCamelCase_ ,unk_token=lowerCamelCase_ ,sep_token=lowerCamelCase_ ,cls_token=lowerCamelCase_ ,pad_token=lowerCamelCase_ ,mask_token=lowerCamelCase_ ,add_prefix_space=lowerCamelCase_ ,**lowerCamelCase_ ,)
with open(lowerCamelCase_ ,encoding="""utf-8""" ) as vocab_handle:
UpperCAmelCase_ : Union[str, Any] = json.load(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = {v: k for k, v in self.encoder.items()}
UpperCAmelCase_ : Any = errors # how to handle errors in decoding
UpperCAmelCase_ : int = bytes_to_unicode()
UpperCAmelCase_ : Dict = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCamelCase_ ,encoding="""utf-8""" ) as merges_handle:
UpperCAmelCase_ : Any = merges_handle.read().split("""\n""" )[1:-1]
UpperCAmelCase_ : int = [tuple(merge.split() ) for merge in bpe_merges]
UpperCAmelCase_ : Union[str, Any] = dict(zip(lowerCamelCase_ ,range(len(lowerCamelCase_ ) ) ) )
UpperCAmelCase_ : Tuple = {}
UpperCAmelCase_ : Optional[int] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCAmelCase_ : int = re.compile(R"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def A__ ( self: List[str] ) -> List[str]:
return len(self.encoder )
def A__ ( self: Any ) -> Union[str, Any]:
return dict(self.encoder ,**self.added_tokens_encoder )
def A__ ( self: Tuple ,lowerCamelCase_: Dict ) -> Optional[Any]:
if token in self.cache:
return self.cache[token]
UpperCAmelCase_ : Union[str, Any] = tuple(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = get_pairs(lowerCamelCase_ )
if not pairs:
return token
while True:
UpperCAmelCase_ : Union[str, Any] = min(lowerCamelCase_ ,key=lambda lowerCamelCase_ : self.bpe_ranks.get(lowerCamelCase_ ,float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase_ , UpperCAmelCase_ : Any = bigram
UpperCAmelCase_ : Optional[Any] = []
UpperCAmelCase_ : List[str] = 0
while i < len(lowerCamelCase_ ):
try:
UpperCAmelCase_ : str = word.index(lowerCamelCase_ ,lowerCamelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCAmelCase_ : Union[str, Any] = j
if word[i] == first and i < len(lowerCamelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase_ : List[str] = tuple(lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = new_word
if len(lowerCamelCase_ ) == 1:
break
else:
UpperCAmelCase_ : List[str] = get_pairs(lowerCamelCase_ )
UpperCAmelCase_ : int = """ """.join(lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = word
return word
def A__ ( self: Union[str, Any] ,lowerCamelCase_: Tuple ) -> List[str]:
UpperCAmelCase_ : str = []
for token in re.findall(self.pat ,lowerCamelCase_ ):
UpperCAmelCase_ : List[Any] = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCamelCase_ ).split(""" """ ) )
return bpe_tokens
def A__ ( self: List[Any] ,lowerCamelCase_: Optional[Any] ) -> Optional[int]:
return self.encoder.get(lowerCamelCase_ ,self.encoder.get(self.unk_token ) )
def A__ ( self: List[str] ,lowerCamelCase_: str ) -> Optional[Any]:
return self.decoder.get(lowerCamelCase_ )
def A__ ( self: List[str] ,lowerCamelCase_: List[str] ) -> List[Any]:
UpperCAmelCase_ : str = """""".join(lowerCamelCase_ )
UpperCAmelCase_ : int = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" ,errors=self.errors )
return text
def A__ ( self: Optional[Any] ,lowerCamelCase_: str ,lowerCamelCase_: Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(lowerCamelCase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase_ : List[Any] = os.path.join(
lowerCamelCase_ ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCAmelCase_ : List[str] = os.path.join(
lowerCamelCase_ ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=lowerCamelCase_ ,ensure_ascii=lowerCamelCase_ ) + """\n""" )
UpperCAmelCase_ : str = 0
with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda lowerCamelCase_ : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
""" Please check that the tokenizer is not corrupted!""" )
UpperCAmelCase_ : Tuple = token_index
writer.write(""" """.join(lowerCamelCase_ ) + """\n""" )
index += 1
return vocab_file, merge_file
def A__ ( self: str ,lowerCamelCase_: List[int] ,lowerCamelCase_: Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase_ : int = [self.cls_token_id]
UpperCAmelCase_ : Optional[int] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A__ ( self: Union[str, Any] ,lowerCamelCase_: List[int] ,lowerCamelCase_: Optional[List[int]] = None ,lowerCamelCase_: bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_ ,token_ids_a=lowerCamelCase_ ,already_has_special_tokens=lowerCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase_ )) + [1]
return [1] + ([0] * len(lowerCamelCase_ )) + [1, 1] + ([0] * len(lowerCamelCase_ )) + [1]
def A__ ( self: str ,lowerCamelCase_: List[int] ,lowerCamelCase_: Optional[List[int]] = None ) -> List[int]:
UpperCAmelCase_ : Optional[Any] = [self.sep_token_id]
UpperCAmelCase_ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A__ ( self: Optional[Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: str=False ,**lowerCamelCase_: List[str] ) -> Optional[int]:
UpperCAmelCase_ : Optional[int] = kwargs.pop("""add_prefix_space""" ,self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCamelCase_ ) > 0 and not text[0].isspace()):
UpperCAmelCase_ : Dict = """ """ + text
return (text, kwargs)
def A__ ( self: List[str] ,lowerCamelCase_: Union[Dict[str, EncodedInput], BatchEncoding] ,lowerCamelCase_: Optional[int] = None ,lowerCamelCase_: PaddingStrategy = PaddingStrategy.DO_NOT_PAD ,lowerCamelCase_: Optional[int] = None ,lowerCamelCase_: Optional[bool] = None ,) -> dict:
UpperCAmelCase_ : Optional[int] = super()._pad(
encoded_inputs=lowerCamelCase_ ,max_length=lowerCamelCase_ ,padding_strategy=lowerCamelCase_ ,pad_to_multiple_of=lowerCamelCase_ ,return_attention_mask=lowerCamelCase_ ,)
# Load from model defaults
if return_attention_mask is None:
UpperCAmelCase_ : str = """attention_mask""" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
UpperCAmelCase_ : str = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
UpperCAmelCase_ : List[Any] = len(encoded_inputs["""global_attention_mask"""] ) != len(lowerCamelCase_ )
if needs_to_be_padded:
UpperCAmelCase_ : Dict = len(lowerCamelCase_ ) - len(encoded_inputs["""global_attention_mask"""] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
UpperCAmelCase_ : str = (
encoded_inputs["""global_attention_mask"""] + [-1] * difference
)
elif self.padding_side == "left":
UpperCAmelCase_ : List[str] = [-1] * difference + encoded_inputs[
"""global_attention_mask"""
]
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return encoded_inputs
| 345 | 1 |
import random
def lowerCamelCase_ ( _a : int ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = num - 1
UpperCAmelCase_ : Optional[Any] = 0
while s % 2 == 0:
UpperCAmelCase_ : List[Any] = s // 2
t += 1
for _ in range(5 ):
UpperCAmelCase_ : List[str] = random.randrange(2 , num - 1 )
UpperCAmelCase_ : Optional[Any] = pow(_a , _a , _a )
if v != 1:
UpperCAmelCase_ : Optional[Any] = 0
while v != (num - 1):
if i == t - 1:
return False
else:
UpperCAmelCase_ : Optional[Any] = i + 1
UpperCAmelCase_ : Optional[int] = (v**2) % num
return True
def lowerCamelCase_ ( _a : int ):
'''simple docstring'''
if num < 2:
return False
UpperCAmelCase_ : List[str] = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
101,
103,
107,
109,
113,
127,
131,
137,
139,
149,
151,
157,
163,
167,
173,
179,
181,
191,
193,
197,
199,
211,
223,
227,
229,
233,
239,
241,
251,
257,
263,
269,
271,
277,
281,
283,
293,
307,
311,
313,
317,
331,
337,
347,
349,
353,
359,
367,
373,
379,
383,
389,
397,
401,
409,
419,
421,
431,
433,
439,
443,
449,
457,
461,
463,
467,
479,
487,
491,
499,
503,
509,
521,
523,
541,
547,
557,
563,
569,
571,
577,
587,
593,
599,
601,
607,
613,
617,
619,
631,
641,
643,
647,
653,
659,
661,
673,
677,
683,
691,
701,
709,
719,
727,
733,
739,
743,
751,
757,
761,
769,
773,
787,
797,
809,
811,
821,
823,
827,
829,
839,
853,
857,
859,
863,
877,
881,
883,
887,
907,
911,
919,
929,
937,
941,
947,
953,
967,
971,
977,
983,
991,
997,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(_a )
def lowerCamelCase_ ( _a : int = 1024 ):
'''simple docstring'''
while True:
UpperCAmelCase_ : Tuple = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(_a ):
return num
if __name__ == "__main__":
UpperCamelCase_ = generate_large_prime()
print(('''Prime number:''', num))
print(('''is_prime_low_num:''', is_prime_low_num(num)))
| 345 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: Union[str, Any] ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def A__ ( self: List[str] ) -> Dict:
UpperCAmelCase_ : Union[str, Any] = 1
UpperCAmelCase_ : Tuple = 3
UpperCAmelCase_ : Optional[Any] = (32, 32)
UpperCAmelCase_ : Optional[int] = floats_tensor((batch_size, num_channels) + sizes ,rng=random.Random(0 ) ).to(lowerCamelCase_ )
return image
@property
def A__ ( self: List[Any] ) -> Optional[Any]:
torch.manual_seed(0 )
UpperCAmelCase_ : int = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") ,up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") ,cross_attention_dim=32 ,)
return model
@property
def A__ ( self: str ) -> List[str]:
torch.manual_seed(0 )
UpperCAmelCase_ : Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,)
return model
@property
def A__ ( self: Optional[int] ) -> int:
torch.manual_seed(0 )
UpperCAmelCase_ : Dict = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,)
return CLIPTextModel(lowerCamelCase_ )
@property
def A__ ( self: Tuple ) -> Tuple:
def extract(*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: str ):
class _snake_case :
'''simple docstring'''
def __init__( self: List[Any] ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = torch.ones([0] )
def A__ ( self: List[Any] ,lowerCamelCase_: str ) -> int:
self.pixel_values.to(lowerCamelCase_ )
return self
return Out()
return extract
def A__ ( self: Union[str, Any] ) -> Tuple:
UpperCAmelCase_ : int = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : int = self.dummy_cond_unet
UpperCAmelCase_ : Optional[Any] = DDIMScheduler(
beta_start=0.0_0_0_8_5 ,beta_end=0.0_1_2 ,beta_schedule="""scaled_linear""" ,clip_sample=lowerCamelCase_ ,set_alpha_to_one=lowerCamelCase_ ,)
UpperCAmelCase_ : str = self.dummy_vae
UpperCAmelCase_ : List[str] = self.dummy_text_encoder
UpperCAmelCase_ : int = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
UpperCAmelCase_ : str = StableDiffusionPipeline(
unet=lowerCamelCase_ ,scheduler=lowerCamelCase_ ,vae=lowerCamelCase_ ,text_encoder=lowerCamelCase_ ,tokenizer=lowerCamelCase_ ,safety_checker=lowerCamelCase_ ,feature_extractor=self.dummy_extractor ,)
UpperCAmelCase_ : List[str] = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : List[str] = """A painting of a squirrel eating a burger"""
UpperCAmelCase_ : str = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
UpperCAmelCase_ : int = sd_pipe([prompt] ,generator=lowerCamelCase_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" )
UpperCAmelCase_ : List[Any] = output.images
UpperCAmelCase_ : str = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
UpperCAmelCase_ : Dict = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" ,return_dict=lowerCamelCase_ ,)[0]
UpperCAmelCase_ : int = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ : Tuple = np.array([0.5_7_5_6, 0.6_1_1_8, 0.5_0_0_5, 0.5_0_4_1, 0.5_4_7_1, 0.4_7_2_6, 0.4_9_7_6, 0.4_8_6_5, 0.4_8_6_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def A__ ( self: Optional[Any] ) -> Any:
UpperCAmelCase_ : Tuple = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : Dict = self.dummy_cond_unet
UpperCAmelCase_ : List[Any] = PNDMScheduler(skip_prk_steps=lowerCamelCase_ )
UpperCAmelCase_ : str = self.dummy_vae
UpperCAmelCase_ : Union[str, Any] = self.dummy_text_encoder
UpperCAmelCase_ : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
UpperCAmelCase_ : Any = StableDiffusionPipeline(
unet=lowerCamelCase_ ,scheduler=lowerCamelCase_ ,vae=lowerCamelCase_ ,text_encoder=lowerCamelCase_ ,tokenizer=lowerCamelCase_ ,safety_checker=lowerCamelCase_ ,feature_extractor=self.dummy_extractor ,)
UpperCAmelCase_ : int = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = """A painting of a squirrel eating a burger"""
UpperCAmelCase_ : Optional[Any] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
UpperCAmelCase_ : Optional[Any] = sd_pipe([prompt] ,generator=lowerCamelCase_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" )
UpperCAmelCase_ : str = output.images
UpperCAmelCase_ : Union[str, Any] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
UpperCAmelCase_ : int = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" ,return_dict=lowerCamelCase_ ,)[0]
UpperCAmelCase_ : Dict = image[0, -3:, -3:, -1]
UpperCAmelCase_ : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ : Tuple = np.array([0.5_1_2_5, 0.5_7_1_6, 0.4_8_2_8, 0.5_0_6_0, 0.5_6_5_0, 0.4_7_6_8, 0.5_1_8_5, 0.4_8_9_5, 0.4_9_9_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def A__ ( self: str ) -> Dict:
UpperCAmelCase_ : Any = StableDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-lms-pipe""" ,safety_checker=lowerCamelCase_ )
assert isinstance(lowerCamelCase_ ,lowerCamelCase_ )
assert isinstance(pipe.scheduler ,lowerCamelCase_ )
assert pipe.safety_checker is None
UpperCAmelCase_ : List[Any] = pipe("""example prompt""" ,num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCamelCase_ )
UpperCAmelCase_ : Any = StableDiffusionPipeline.from_pretrained(lowerCamelCase_ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
UpperCAmelCase_ : Optional[int] = pipe("""example prompt""" ,num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != """cuda""" ,"""This test requires a GPU""" )
def A__ ( self: List[str] ) -> Any:
UpperCAmelCase_ : Tuple = self.dummy_cond_unet
UpperCAmelCase_ : Dict = PNDMScheduler(skip_prk_steps=lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = self.dummy_vae
UpperCAmelCase_ : List[str] = self.dummy_text_encoder
UpperCAmelCase_ : Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# put models in fp16
UpperCAmelCase_ : Optional[Any] = unet.half()
UpperCAmelCase_ : Optional[int] = vae.half()
UpperCAmelCase_ : int = bert.half()
# make sure here that pndm scheduler skips prk
UpperCAmelCase_ : Any = StableDiffusionPipeline(
unet=lowerCamelCase_ ,scheduler=lowerCamelCase_ ,vae=lowerCamelCase_ ,text_encoder=lowerCamelCase_ ,tokenizer=lowerCamelCase_ ,safety_checker=lowerCamelCase_ ,feature_extractor=self.dummy_extractor ,)
UpperCAmelCase_ : List[Any] = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : Tuple = """A painting of a squirrel eating a burger"""
UpperCAmelCase_ : Optional[int] = sd_pipe([prompt] ,num_inference_steps=2 ,output_type="""np""" ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: Optional[int] ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self: List[str] ) -> List[Any]:
UpperCAmelCase_ : Tuple = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" ,safety_checker=lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
UpperCAmelCase_ : str = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : str = (
"""portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"""
""" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"""
""" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"""
""" children from bahnhof zoo, detailed """
)
UpperCAmelCase_ : Optional[int] = 4003660346
UpperCAmelCase_ : int = 7
# without safety guidance (sld_guidance_scale = 0)
UpperCAmelCase_ : Dict = torch.manual_seed(lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=0 ,)
UpperCAmelCase_ : Optional[int] = output.images
UpperCAmelCase_ : Union[str, Any] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Dict = [0.2_2_7_8, 0.2_2_3_1, 0.2_2_4_9, 0.2_3_3_3, 0.2_3_0_3, 0.1_8_8_5, 0.2_2_7_3, 0.2_1_4_4, 0.2_1_7_6]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
# without safety guidance (strong configuration)
UpperCAmelCase_ : Union[str, Any] = torch.manual_seed(lowerCamelCase_ )
UpperCAmelCase_ : Any = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=2000 ,sld_warmup_steps=7 ,sld_threshold=0.0_2_5 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,)
UpperCAmelCase_ : Tuple = output.images
UpperCAmelCase_ : Union[str, Any] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : str = [0.2_3_8_3, 0.2_2_7_6, 0.2_3_6, 0.2_1_9_2, 0.2_1_8_6, 0.2_0_5_3, 0.1_9_7_1, 0.1_9_0_1, 0.1_7_1_9]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A__ ( self: Optional[int] ) -> Any:
UpperCAmelCase_ : Any = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" ,safety_checker=lowerCamelCase_ )
UpperCAmelCase_ : Any = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
UpperCAmelCase_ : str = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : Any = """padme amidala taking a bath artwork, safe for work, no nudity"""
UpperCAmelCase_ : List[Any] = 2734971755
UpperCAmelCase_ : Optional[Any] = 7
UpperCAmelCase_ : int = torch.manual_seed(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=0 ,)
UpperCAmelCase_ : Dict = output.images
UpperCAmelCase_ : Tuple = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Optional[Any] = [0.3_5_0_2, 0.3_6_2_2, 0.3_3_9_6, 0.3_6_4_2, 0.3_4_7_8, 0.3_3_1_8, 0.3_5, 0.3_3_4_8, 0.3_2_9_7]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
UpperCAmelCase_ : Any = torch.manual_seed(lowerCamelCase_ )
UpperCAmelCase_ : Tuple = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=2000 ,sld_warmup_steps=7 ,sld_threshold=0.0_2_5 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,)
UpperCAmelCase_ : Dict = output.images
UpperCAmelCase_ : List[Any] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Tuple = [0.5_5_3_1, 0.5_2_0_6, 0.4_8_9_5, 0.5_1_5_6, 0.5_1_8_2, 0.4_7_5_1, 0.4_8_0_2, 0.4_8_0_3, 0.4_4_4_3]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A__ ( self: Union[str, Any] ) -> int:
UpperCAmelCase_ : List[Any] = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" )
UpperCAmelCase_ : List[str] = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : Any = (
"""the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."""
""" leyendecker"""
)
UpperCAmelCase_ : Optional[Any] = 1044355234
UpperCAmelCase_ : List[str] = 12
UpperCAmelCase_ : List[Any] = torch.manual_seed(lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=0 ,)
UpperCAmelCase_ : Any = output.images
UpperCAmelCase_ : Dict = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Optional[Any] = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-7
UpperCAmelCase_ : Optional[int] = torch.manual_seed(lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=2000 ,sld_warmup_steps=7 ,sld_threshold=0.0_2_5 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,)
UpperCAmelCase_ : List[str] = output.images
UpperCAmelCase_ : Any = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Any = np.array([0.5_8_1_8, 0.6_2_8_5, 0.6_8_3_5, 0.6_0_1_9, 0.6_2_5, 0.6_7_5_4, 0.6_0_9_6, 0.6_3_3_4, 0.6_5_6_1] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 345 | 1 |
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCamelCase_ = logging.get_logger(__name__)
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : Dict = ["pixel_values"]
def __init__( self: Optional[int] ,lowerCamelCase_: bool = True ,lowerCamelCase_: Dict[str, int] = None ,lowerCamelCase_: PILImageResampling = PILImageResampling.BICUBIC ,lowerCamelCase_: bool = True ,lowerCamelCase_: Dict[str, int] = None ,lowerCamelCase_: bool = True ,lowerCamelCase_: Union[int, float] = 1 / 255 ,lowerCamelCase_: bool = True ,lowerCamelCase_: Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN ,lowerCamelCase_: Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD ,**lowerCamelCase_: Dict ,) -> None:
super().__init__(**lowerCamelCase_ )
UpperCAmelCase_ : List[str] = size if size is not None else {"""shortest_edge""": 224}
UpperCAmelCase_ : List[str] = get_size_dict(lowerCamelCase_ ,default_to_square=lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
UpperCAmelCase_ : str = get_size_dict(lowerCamelCase_ ,param_name="""crop_size""" )
UpperCAmelCase_ : Optional[int] = do_resize
UpperCAmelCase_ : List[Any] = size
UpperCAmelCase_ : Dict = resample
UpperCAmelCase_ : Tuple = do_center_crop
UpperCAmelCase_ : Union[str, Any] = crop_size
UpperCAmelCase_ : Optional[Any] = do_rescale
UpperCAmelCase_ : int = rescale_factor
UpperCAmelCase_ : List[Any] = do_normalize
UpperCAmelCase_ : Tuple = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
UpperCAmelCase_ : Optional[Any] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def A__ ( self: Optional[int] ,lowerCamelCase_: np.ndarray ,lowerCamelCase_: Dict[str, int] ,lowerCamelCase_: PILImageResampling = PILImageResampling.BICUBIC ,lowerCamelCase_: Optional[Union[str, ChannelDimension]] = None ,**lowerCamelCase_: Optional[Any] ,) -> np.ndarray:
UpperCAmelCase_ : List[str] = get_size_dict(lowerCamelCase_ ,default_to_square=lowerCamelCase_ )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
UpperCAmelCase_ : Dict = int((256 / 224) * size["""shortest_edge"""] )
UpperCAmelCase_ : Optional[Any] = get_resize_output_image_size(lowerCamelCase_ ,size=lowerCamelCase_ ,default_to_square=lowerCamelCase_ )
UpperCAmelCase_ : List[str] = {"""height""": output_size[0], """width""": output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
F'''Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}''' )
return resize(
lowerCamelCase_ ,size=(size_dict["""height"""], size_dict["""width"""]) ,resample=lowerCamelCase_ ,data_format=lowerCamelCase_ ,**lowerCamelCase_ )
def A__ ( self: Union[str, Any] ,lowerCamelCase_: np.ndarray ,lowerCamelCase_: Dict[str, int] ,lowerCamelCase_: Optional[Union[str, ChannelDimension]] = None ,**lowerCamelCase_: int ,) -> np.ndarray:
UpperCAmelCase_ : str = get_size_dict(lowerCamelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'''Size dict must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return center_crop(lowerCamelCase_ ,size=(size["""height"""], size["""width"""]) ,data_format=lowerCamelCase_ ,**lowerCamelCase_ )
def A__ ( self: str ,lowerCamelCase_: np.ndarray ,lowerCamelCase_: Union[int, float] ,lowerCamelCase_: Optional[Union[str, ChannelDimension]] = None ,**lowerCamelCase_: int ,) -> np.ndarray:
return rescale(lowerCamelCase_ ,scale=lowerCamelCase_ ,data_format=lowerCamelCase_ ,**lowerCamelCase_ )
def A__ ( self: Optional[Any] ,lowerCamelCase_: np.ndarray ,lowerCamelCase_: Union[float, List[float]] ,lowerCamelCase_: Union[float, List[float]] ,lowerCamelCase_: Optional[Union[str, ChannelDimension]] = None ,**lowerCamelCase_: str ,) -> np.ndarray:
return normalize(lowerCamelCase_ ,mean=lowerCamelCase_ ,std=lowerCamelCase_ ,data_format=lowerCamelCase_ ,**lowerCamelCase_ )
def A__ ( self: Optional[Any] ,lowerCamelCase_: ImageInput ,lowerCamelCase_: Optional[bool] = None ,lowerCamelCase_: Optional[Dict[str, int]] = None ,lowerCamelCase_: PILImageResampling = None ,lowerCamelCase_: Optional[bool] = None ,lowerCamelCase_: Optional[Dict[str, int]] = None ,lowerCamelCase_: Optional[bool] = None ,lowerCamelCase_: Optional[float] = None ,lowerCamelCase_: Optional[bool] = None ,lowerCamelCase_: Optional[Union[float, Iterable[float]]] = None ,lowerCamelCase_: Optional[Union[float, Iterable[float]]] = None ,lowerCamelCase_: Optional[TensorType] = None ,lowerCamelCase_: ChannelDimension = ChannelDimension.FIRST ,**lowerCamelCase_: Dict ,) -> BatchFeature:
UpperCAmelCase_ : Tuple = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ : List[Any] = resample if resample is not None else self.resample
UpperCAmelCase_ : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase_ : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ : int = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ : str = image_std if image_std is not None else self.image_std
UpperCAmelCase_ : Any = size if size is not None else self.size
UpperCAmelCase_ : Tuple = get_size_dict(lowerCamelCase_ ,default_to_square=lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase_ : Union[str, Any] = get_size_dict(lowerCamelCase_ ,param_name="""crop_size""" )
UpperCAmelCase_ : Optional[int] = make_list_of_images(lowerCamelCase_ )
if not valid_images(lowerCamelCase_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
UpperCAmelCase_ : int = [to_numpy_array(lowerCamelCase_ ) for image in images]
if do_resize:
UpperCAmelCase_ : List[str] = [self.resize(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) for image in images]
if do_center_crop:
UpperCAmelCase_ : str = [self.center_crop(lowerCamelCase_ ,lowerCamelCase_ ) for image in images]
if do_rescale:
UpperCAmelCase_ : Optional[Any] = [self.rescale(lowerCamelCase_ ,lowerCamelCase_ ) for image in images]
if do_normalize:
UpperCAmelCase_ : List[Any] = [self.normalize(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) for image in images]
UpperCAmelCase_ : Any = [to_channel_dimension_format(lowerCamelCase_ ,lowerCamelCase_ ) for image in images]
UpperCAmelCase_ : Dict = {"""pixel_values""": images}
return BatchFeature(data=lowerCamelCase_ ,tensor_type=lowerCamelCase_ )
| 345 |
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class _snake_case :
'''simple docstring'''
def __init__( self: Optional[int] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Tuple=13 ,lowerCamelCase_: int=7 ,lowerCamelCase_: Union[str, Any]=True ,lowerCamelCase_: Dict=True ,lowerCamelCase_: str=True ,lowerCamelCase_: Tuple=True ,lowerCamelCase_: int=99 ,lowerCamelCase_: List[str]=64 ,lowerCamelCase_: Tuple=32 ,lowerCamelCase_: List[str]=5 ,lowerCamelCase_: str=4 ,lowerCamelCase_: str=37 ,lowerCamelCase_: Union[str, Any]="gelu" ,lowerCamelCase_: Union[str, Any]=0.1 ,lowerCamelCase_: str=0.1 ,lowerCamelCase_: List[str]=512 ,lowerCamelCase_: Dict=16 ,lowerCamelCase_: List[str]=2 ,lowerCamelCase_: List[str]=0.0_2 ,lowerCamelCase_: Optional[Any]=3 ,lowerCamelCase_: Union[str, Any]=4 ,lowerCamelCase_: str=None ,) -> List[str]:
UpperCAmelCase_ : Any = parent
UpperCAmelCase_ : List[Any] = batch_size
UpperCAmelCase_ : Union[str, Any] = seq_length
UpperCAmelCase_ : Optional[int] = is_training
UpperCAmelCase_ : Dict = use_input_mask
UpperCAmelCase_ : Any = use_token_type_ids
UpperCAmelCase_ : Tuple = use_labels
UpperCAmelCase_ : List[Any] = vocab_size
UpperCAmelCase_ : str = hidden_size
UpperCAmelCase_ : List[str] = embedding_size
UpperCAmelCase_ : List[Any] = num_hidden_layers
UpperCAmelCase_ : List[Any] = num_attention_heads
UpperCAmelCase_ : List[Any] = intermediate_size
UpperCAmelCase_ : Tuple = hidden_act
UpperCAmelCase_ : str = hidden_dropout_prob
UpperCAmelCase_ : List[str] = attention_probs_dropout_prob
UpperCAmelCase_ : Any = max_position_embeddings
UpperCAmelCase_ : List[str] = type_vocab_size
UpperCAmelCase_ : Any = type_sequence_label_size
UpperCAmelCase_ : Optional[Any] = initializer_range
UpperCAmelCase_ : Optional[int] = num_labels
UpperCAmelCase_ : Optional[int] = num_choices
UpperCAmelCase_ : List[str] = scope
def A__ ( self: Any ) -> Optional[int]:
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase_ : List[str] = None
if self.use_input_mask:
UpperCAmelCase_ : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ : Dict = None
if self.use_token_type_ids:
UpperCAmelCase_ : str = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
UpperCAmelCase_ : int = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : Union[str, Any] = None
if self.use_labels:
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
UpperCAmelCase_ : int = ids_tensor([self.batch_size] ,self.num_choices )
UpperCAmelCase_ : Tuple = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self: Any ) -> Dict:
return MobileBertConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,embedding_size=self.embedding_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=lowerCamelCase_ ,initializer_range=self.initializer_range ,)
def A__ ( self: List[Any] ,lowerCamelCase_: str ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: Any ,lowerCamelCase_: List[Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: str ,lowerCamelCase_: str ) -> int:
UpperCAmelCase_ : Any = MobileBertModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : List[Any] = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ ,token_type_ids=lowerCamelCase_ )
UpperCAmelCase_ : Tuple = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def A__ ( self: Optional[Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Tuple ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Dict ) -> int:
UpperCAmelCase_ : Union[str, Any] = MobileBertForMaskedLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def A__ ( self: str ,lowerCamelCase_: Any ,lowerCamelCase_: Dict ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: str ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: int ) -> int:
UpperCAmelCase_ : List[Any] = MobileBertForNextSentencePrediction(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(
lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, 2) )
def A__ ( self: Tuple ,lowerCamelCase_: Tuple ,lowerCamelCase_: Dict ,lowerCamelCase_: List[str] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Tuple ,lowerCamelCase_: Dict ,lowerCamelCase_: Any ) -> Optional[Any]:
UpperCAmelCase_ : Tuple = MobileBertForPreTraining(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Optional[int] = model(
lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ ,next_sentence_label=lowerCamelCase_ ,)
self.parent.assertEqual(result.prediction_logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape ,(self.batch_size, 2) )
def A__ ( self: Any ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: Any ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Any ,lowerCamelCase_: int ,lowerCamelCase_: List[Any] ) -> List[str]:
UpperCAmelCase_ : Optional[Any] = MobileBertForQuestionAnswering(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : int = model(
lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,start_positions=lowerCamelCase_ ,end_positions=lowerCamelCase_ ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def A__ ( self: List[str] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Any ,lowerCamelCase_: Tuple ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Any ) -> str:
UpperCAmelCase_ : Optional[Any] = self.num_labels
UpperCAmelCase_ : Union[str, Any] = MobileBertForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Optional[int] = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def A__ ( self: Union[str, Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: str ,lowerCamelCase_: Dict ,lowerCamelCase_: Any ,lowerCamelCase_: List[str] ) -> Any:
UpperCAmelCase_ : str = self.num_labels
UpperCAmelCase_ : Optional[int] = MobileBertForTokenClassification(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : List[Any] = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def A__ ( self: Tuple ,lowerCamelCase_: str ,lowerCamelCase_: int ,lowerCamelCase_: Tuple ,lowerCamelCase_: List[Any] ,lowerCamelCase_: str ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: List[Any] ) -> Union[str, Any]:
UpperCAmelCase_ : Union[str, Any] = self.num_choices
UpperCAmelCase_ : Tuple = MobileBertForMultipleChoice(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Dict = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
UpperCAmelCase_ : Union[str, Any] = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
UpperCAmelCase_ : str = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
UpperCAmelCase_ : Optional[int] = model(
lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def A__ ( self: List[str] ) -> str:
UpperCAmelCase_ : str = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : Union[str, Any] = config_and_inputs
UpperCAmelCase_ : Dict = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _snake_case ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
A__ : Dict = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
A__ : List[str] = (
{
"feature-extraction": MobileBertModel,
"fill-mask": MobileBertForMaskedLM,
"question-answering": MobileBertForQuestionAnswering,
"text-classification": MobileBertForSequenceClassification,
"token-classification": MobileBertForTokenClassification,
"zero-shot": MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
A__ : List[str] = True
def A__ ( self: Dict ,lowerCamelCase_: Tuple ,lowerCamelCase_: Tuple ,lowerCamelCase_: int=False ) -> Union[str, Any]:
UpperCAmelCase_ : List[Any] = super()._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ,return_labels=lowerCamelCase_ )
if return_labels:
if model_class in get_values(lowerCamelCase_ ):
UpperCAmelCase_ : Any = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) ,dtype=torch.long ,device=lowerCamelCase_ )
UpperCAmelCase_ : List[str] = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=lowerCamelCase_ )
return inputs_dict
def A__ ( self: List[str] ) -> Any:
UpperCAmelCase_ : List[str] = MobileBertModelTester(self )
UpperCAmelCase_ : Union[str, Any] = ConfigTester(self ,config_class=lowerCamelCase_ ,hidden_size=37 )
def A__ ( self: Optional[Any] ) -> List[Any]:
self.config_tester.run_common_tests()
def A__ ( self: List[str] ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*lowerCamelCase_ )
def A__ ( self: Optional[int] ) -> Optional[int]:
UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*lowerCamelCase_ )
def A__ ( self: Optional[Any] ) -> Tuple:
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*lowerCamelCase_ )
def A__ ( self: List[Any] ) -> List[str]:
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*lowerCamelCase_ )
def A__ ( self: Optional[Any] ) -> Dict:
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*lowerCamelCase_ )
def A__ ( self: Optional[int] ) -> Optional[int]:
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*lowerCamelCase_ )
def A__ ( self: Union[str, Any] ) -> Optional[int]:
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*lowerCamelCase_ )
def A__ ( self: Any ) -> Optional[int]:
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*lowerCamelCase_ )
def lowerCamelCase_ ( _a : Union[str, Any] ):
'''simple docstring'''
return torch.tensor(
_a , dtype=torch.long , device=_a , )
UpperCamelCase_ = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
@slow
def A__ ( self: List[Any] ) -> str:
UpperCAmelCase_ : Any = MobileBertModel.from_pretrained("""google/mobilebert-uncased""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : str = _long_tensor([[101, 7110, 1005, 1056, 2023, 11333, 17413, 1029, 102]] )
with torch.no_grad():
UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ )[0]
UpperCAmelCase_ : Union[str, Any] = torch.Size((1, 9, 512) )
self.assertEqual(output.shape ,lowerCamelCase_ )
UpperCAmelCase_ : Tuple = torch.tensor(
[
[
[-2.473_6526e07, 8.269_1656e04, 1.652_1838e05],
[-5.754_1704e-01, 3.905_6022e00, 4.401_1507e00],
[2.604_7359e00, 1.567_7652e00, -1.732_4188e-01],
]
] ,device=lowerCamelCase_ ,)
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
UpperCAmelCase_ : Dict = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
UpperCAmelCase_ : Dict = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 345 | 1 |
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase_ = get_tests_dir('''fixtures/test_sentencepiece_no_bos.model''')
@require_sentencepiece
@require_tokenizers
class _snake_case ( __snake_case , unittest.TestCase ):
'''simple docstring'''
A__ : Any = PegasusTokenizer
A__ : str = PegasusTokenizerFast
A__ : Any = True
A__ : int = True
def A__ ( self: str ) -> Optional[int]:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase_ : str = PegasusTokenizer(lowerCamelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def A__ ( self: Optional[Any] ) -> Optional[int]:
return PegasusTokenizer.from_pretrained("""google/pegasus-large""" )
def A__ ( self: List[str] ,**lowerCamelCase_: str ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname ,**lowerCamelCase_ )
def A__ ( self: int ,lowerCamelCase_: List[str] ) -> Union[str, Any]:
return ("This is a test", "This is a test")
def A__ ( self: Any ) -> Optional[Any]:
UpperCAmelCase_ : int = """</s>"""
UpperCAmelCase_ : Union[str, Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase_ ) ,lowerCamelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase_ ) ,lowerCamelCase_ )
def A__ ( self: List[Any] ) -> Dict:
UpperCAmelCase_ : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,"""<pad>""" )
self.assertEqual(vocab_keys[1] ,"""</s>""" )
self.assertEqual(vocab_keys[-1] ,"""v""" )
self.assertEqual(len(lowerCamelCase_ ) ,1103 )
def A__ ( self: Dict ) -> int:
self.assertEqual(self.get_tokenizer().vocab_size ,1103 )
def A__ ( self: Dict ) -> List[str]:
UpperCAmelCase_ : List[Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
UpperCAmelCase_ : Optional[int] = self.tokenizer_class.from_pretrained(self.tmpdirname )
UpperCAmelCase_ : int = (
"""Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important"""
""" </s> <pad> <pad> <pad>"""
)
UpperCAmelCase_ : Optional[Any] = rust_tokenizer([raw_input_str] ,return_tensors=lowerCamelCase_ ,add_special_tokens=lowerCamelCase_ ).input_ids[0]
UpperCAmelCase_ : int = py_tokenizer([raw_input_str] ,return_tensors=lowerCamelCase_ ,add_special_tokens=lowerCamelCase_ ).input_ids[0]
self.assertListEqual(lowerCamelCase_ ,lowerCamelCase_ )
def A__ ( self: Any ) -> Any:
UpperCAmelCase_ : Optional[int] = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
UpperCAmelCase_ : List[Any] = """<mask_1> To ensure a <mask_2> flow of bank resolutions."""
UpperCAmelCase_ : Any = [2, 413, 615, 114, 3, 1971, 113, 1679, 10710, 107, 1]
UpperCAmelCase_ : int = tokenizer([raw_input_str] ,return_tensors=lowerCamelCase_ ).input_ids[0]
self.assertListEqual(lowerCamelCase_ ,lowerCamelCase_ )
def A__ ( self: Dict ) -> Optional[int]:
UpperCAmelCase_ : Dict = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 96103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
UpperCAmelCase_ : Any = """To ensure a smooth flow of bank resolutions."""
UpperCAmelCase_ : List[Any] = [413, 615, 114, 2291, 1971, 113, 1679, 10710, 107, 1]
UpperCAmelCase_ : Optional[Any] = tokenizer([raw_input_str] ,return_tensors=lowerCamelCase_ ).input_ids[0]
self.assertListEqual(lowerCamelCase_ ,lowerCamelCase_ )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def A__ ( self: str ) -> Optional[Any]:
UpperCAmelCase_ : Any = ["""This is going to be way too long.""" * 150, """short example"""]
UpperCAmelCase_ : Dict = ["""not super long but more than 5 tokens""", """tiny"""]
UpperCAmelCase_ : List[str] = self._large_tokenizer(lowerCamelCase_ ,padding=lowerCamelCase_ ,truncation=lowerCamelCase_ ,return_tensors="""pt""" )
UpperCAmelCase_ : Tuple = self._large_tokenizer(
text_target=lowerCamelCase_ ,max_length=5 ,padding=lowerCamelCase_ ,truncation=lowerCamelCase_ ,return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(lowerCamelCase_ ) == 2 # input_ids, attention_mask.
@slow
def A__ ( self: Tuple ) -> Union[str, Any]:
# fmt: off
UpperCAmelCase_ : Optional[Any] = {"""input_ids""": [[38979, 143, 18485, 606, 130, 26669, 87686, 121, 54189, 1129, 111, 26669, 87686, 121, 9114, 14787, 121, 13249, 158, 592, 956, 121, 14621, 31576, 143, 62613, 108, 9688, 930, 43430, 11562, 62613, 304, 108, 11443, 897, 108, 9314, 17415, 63399, 108, 11443, 7614, 18316, 118, 4284, 7148, 12430, 143, 1400, 25703, 158, 111, 4284, 7148, 11772, 143, 21297, 1064, 158, 122, 204, 3506, 1754, 1133, 14787, 1581, 115, 33224, 4482, 111, 1355, 110, 29173, 317, 50833, 108, 20147, 94665, 111, 77198, 107, 1], [110, 62613, 117, 638, 112, 1133, 121, 20098, 1355, 79050, 13872, 135, 1596, 53541, 1352, 141, 13039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 18289, 17780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase_ ,model_name="""google/bigbird-pegasus-large-arxiv""" ,revision="""ba85d0851d708441f91440d509690f1ab6353415""" ,)
@require_sentencepiece
@require_tokenizers
class _snake_case ( __snake_case , unittest.TestCase ):
'''simple docstring'''
A__ : Tuple = PegasusTokenizer
A__ : int = PegasusTokenizerFast
A__ : Any = True
A__ : Optional[Any] = True
def A__ ( self: str ) -> Tuple:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase_ : List[str] = PegasusTokenizer(lowerCamelCase_ ,offset=0 ,mask_token_sent=lowerCamelCase_ ,mask_token="""[MASK]""" )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def A__ ( self: Optional[Any] ) -> Tuple:
return PegasusTokenizer.from_pretrained("""google/bigbird-pegasus-large-arxiv""" )
def A__ ( self: Tuple ,**lowerCamelCase_: Dict ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname ,**lowerCamelCase_ )
def A__ ( self: List[str] ,lowerCamelCase_: Optional[int] ) -> int:
return ("This is a test", "This is a test")
def A__ ( self: List[str] ) -> Any:
UpperCAmelCase_ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
UpperCAmelCase_ : Tuple = self.tokenizer_class.from_pretrained(self.tmpdirname )
UpperCAmelCase_ : Optional[int] = (
"""Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>"""
""" <pad> <pad> <pad>"""
)
UpperCAmelCase_ : List[str] = rust_tokenizer([raw_input_str] ,return_tensors=lowerCamelCase_ ,add_special_tokens=lowerCamelCase_ ).input_ids[0]
UpperCAmelCase_ : Tuple = py_tokenizer([raw_input_str] ,return_tensors=lowerCamelCase_ ,add_special_tokens=lowerCamelCase_ ).input_ids[0]
self.assertListEqual(lowerCamelCase_ ,lowerCamelCase_ )
@require_torch
def A__ ( self: Union[str, Any] ) -> Any:
UpperCAmelCase_ : str = ["""This is going to be way too long.""" * 1000, """short example"""]
UpperCAmelCase_ : List[Any] = ["""not super long but more than 5 tokens""", """tiny"""]
UpperCAmelCase_ : Optional[Any] = self._large_tokenizer(lowerCamelCase_ ,padding=lowerCamelCase_ ,truncation=lowerCamelCase_ ,return_tensors="""pt""" )
UpperCAmelCase_ : List[str] = self._large_tokenizer(
text_target=lowerCamelCase_ ,max_length=5 ,padding=lowerCamelCase_ ,truncation=lowerCamelCase_ ,return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(lowerCamelCase_ ) == 2 # input_ids, attention_mask.
def A__ ( self: List[str] ) -> str:
UpperCAmelCase_ : List[str] = (
"""This is an example string that is used to test the original TF implementation against the HF"""
""" implementation"""
)
UpperCAmelCase_ : Optional[int] = self._large_tokenizer(lowerCamelCase_ ).input_ids
self.assertListEqual(
lowerCamelCase_ ,[182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 25016, 3137, 464, 109, 26955, 3137, 1] ,)
| 345 |
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: str ) -> int:
UpperCAmelCase_ : List[Any] = """ylacombe/bark-small"""
UpperCAmelCase_ : Tuple = tempfile.mkdtemp()
UpperCAmelCase_ : Union[str, Any] = """en_speaker_1"""
UpperCAmelCase_ : Optional[Any] = """This is a test string"""
UpperCAmelCase_ : int = """speaker_embeddings_path.json"""
UpperCAmelCase_ : Any = """speaker_embeddings"""
def A__ ( self: Tuple ,**lowerCamelCase_: List[str] ) -> List[Any]:
return AutoTokenizer.from_pretrained(self.checkpoint ,**lowerCamelCase_ )
def A__ ( self: str ) -> Union[str, Any]:
shutil.rmtree(self.tmpdirname )
def A__ ( self: List[Any] ) -> int:
UpperCAmelCase_ : int = self.get_tokenizer()
UpperCAmelCase_ : Tuple = BarkProcessor(tokenizer=lowerCamelCase_ )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : Optional[int] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer.get_vocab() )
@slow
def A__ ( self: List[Any] ) -> Optional[int]:
UpperCAmelCase_ : List[Any] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint ,speaker_embeddings_dict_path=self.speaker_embeddings_dict_path ,)
processor.save_pretrained(
self.tmpdirname ,speaker_embeddings_dict_path=self.speaker_embeddings_dict_path ,speaker_embeddings_directory=self.speaker_embeddings_directory ,)
UpperCAmelCase_ : Optional[Any] = self.get_tokenizer(bos_token="""(BOS)""" ,eos_token="""(EOS)""" )
UpperCAmelCase_ : List[Any] = BarkProcessor.from_pretrained(
self.tmpdirname ,self.speaker_embeddings_dict_path ,bos_token="""(BOS)""" ,eos_token="""(EOS)""" ,)
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
def A__ ( self: List[str] ) -> Optional[Any]:
UpperCAmelCase_ : Any = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint ,speaker_embeddings_dict_path=self.speaker_embeddings_dict_path ,)
UpperCAmelCase_ : Optional[int] = 35
UpperCAmelCase_ : Optional[int] = 2
UpperCAmelCase_ : Dict = 8
UpperCAmelCase_ : Optional[int] = {
"""semantic_prompt""": np.ones(lowerCamelCase_ ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
UpperCAmelCase_ : str = processor(text=self.input_string ,voice_preset=lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() ,processed_voice_preset.get(lowerCamelCase_ ,np.array([] ) ).tolist() )
# test loading voice preset from npz file
UpperCAmelCase_ : List[Any] = os.path.join(self.tmpdirname ,"""file.npz""" )
np.savez(lowerCamelCase_ ,**lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = processor(text=self.input_string ,voice_preset=lowerCamelCase_ )
UpperCAmelCase_ : int = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() ,processed_voice_preset.get(lowerCamelCase_ ,np.array([] ) ).tolist() )
# test loading voice preset from the hub
UpperCAmelCase_ : Union[str, Any] = processor(text=self.input_string ,voice_preset=self.voice_preset )
def A__ ( self: Dict ) -> Tuple:
UpperCAmelCase_ : Any = self.get_tokenizer()
UpperCAmelCase_ : Dict = BarkProcessor(tokenizer=lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = processor(text=self.input_string )
UpperCAmelCase_ : str = tokenizer(
self.input_string ,padding="""max_length""" ,max_length=256 ,add_special_tokens=lowerCamelCase_ ,return_attention_mask=lowerCamelCase_ ,return_token_type_ids=lowerCamelCase_ ,)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key].squeeze().tolist() )
| 345 | 1 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger(__name__)
def lowerCamelCase_ ( _a : str , _a : Union[str, Any]=False ):
'''simple docstring'''
UpperCAmelCase_ : int = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''deit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''deit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''deit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''deit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''deit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''deit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''deit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''deit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''deit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''deit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """deit.embeddings.cls_token"""),
("""dist_token""", """deit.embeddings.distillation_token"""),
("""patch_embed.proj.weight""", """deit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """deit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """deit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
UpperCAmelCase_ : Optional[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith("""deit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("""norm.weight""", """deit.layernorm.weight"""),
("""norm.bias""", """deit.layernorm.bias"""),
("""head.weight""", """cls_classifier.weight"""),
("""head.bias""", """cls_classifier.bias"""),
("""head_dist.weight""", """distillation_classifier.weight"""),
("""head_dist.bias""", """distillation_classifier.bias"""),
] )
return rename_keys
def lowerCamelCase_ ( _a : str , _a : Dict , _a : Optional[int]=False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
UpperCAmelCase_ : List[Any] = """"""
else:
UpperCAmelCase_ : int = """deit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase_ : List[str] = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
UpperCAmelCase_ : List[Any] = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ : Optional[Any] = in_proj_weight[
: config.hidden_size, :
]
UpperCAmelCase_ : List[Any] = in_proj_bias[: config.hidden_size]
UpperCAmelCase_ : Any = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase_ : Dict = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase_ : int = in_proj_weight[
-config.hidden_size :, :
]
UpperCAmelCase_ : Dict = in_proj_bias[-config.hidden_size :]
def lowerCamelCase_ ( _a : Union[str, Any] , _a : Tuple , _a : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : str = dct.pop(_a )
UpperCAmelCase_ : List[str] = val
def lowerCamelCase_ ( ):
'''simple docstring'''
UpperCAmelCase_ : int = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCAmelCase_ : Dict = Image.open(requests.get(_a , stream=_a ).raw )
return im
@torch.no_grad()
def lowerCamelCase_ ( _a : Any , _a : int ):
'''simple docstring'''
UpperCAmelCase_ : int = DeiTConfig()
# all deit models have fine-tuned heads
UpperCAmelCase_ : Any = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
UpperCAmelCase_ : Dict = 1000
UpperCAmelCase_ : Tuple = """huggingface/label-files"""
UpperCAmelCase_ : List[str] = """imagenet-1k-id2label.json"""
UpperCAmelCase_ : Any = json.load(open(hf_hub_download(_a , _a , repo_type="""dataset""" ) , """r""" ) )
UpperCAmelCase_ : Dict = {int(_a ): v for k, v in idalabel.items()}
UpperCAmelCase_ : int = idalabel
UpperCAmelCase_ : List[Any] = {v: k for k, v in idalabel.items()}
UpperCAmelCase_ : Tuple = int(deit_name[-6:-4] )
UpperCAmelCase_ : Optional[int] = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("""tiny""" ):
UpperCAmelCase_ : Tuple = 192
UpperCAmelCase_ : Optional[Any] = 768
UpperCAmelCase_ : Optional[int] = 12
UpperCAmelCase_ : Dict = 3
elif deit_name[9:].startswith("""small""" ):
UpperCAmelCase_ : List[Any] = 384
UpperCAmelCase_ : Any = 1536
UpperCAmelCase_ : str = 12
UpperCAmelCase_ : Union[str, Any] = 6
if deit_name[9:].startswith("""base""" ):
pass
elif deit_name[4:].startswith("""large""" ):
UpperCAmelCase_ : Any = 1024
UpperCAmelCase_ : List[Any] = 4096
UpperCAmelCase_ : Any = 24
UpperCAmelCase_ : Union[str, Any] = 16
# load original model from timm
UpperCAmelCase_ : int = timm.create_model(_a , pretrained=_a )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
UpperCAmelCase_ : Tuple = timm_model.state_dict()
UpperCAmelCase_ : Any = create_rename_keys(_a , _a )
for src, dest in rename_keys:
rename_key(_a , _a , _a )
read_in_q_k_v(_a , _a , _a )
# load HuggingFace model
UpperCAmelCase_ : List[str] = DeiTForImageClassificationWithTeacher(_a ).eval()
model.load_state_dict(_a )
# Check outputs on an image, prepared by DeiTImageProcessor
UpperCAmelCase_ : Dict = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
UpperCAmelCase_ : List[Any] = DeiTImageProcessor(size=_a , crop_size=config.image_size )
UpperCAmelCase_ : str = image_processor(images=prepare_img() , return_tensors="""pt""" )
UpperCAmelCase_ : Optional[int] = encoding["""pixel_values"""]
UpperCAmelCase_ : int = model(_a )
UpperCAmelCase_ : Tuple = timm_model(_a )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_a , outputs.logits , atol=1E-3 )
Path(_a ).mkdir(exist_ok=_a )
print(F'''Saving model {deit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_a )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_a )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--deit_name''',
default='''vit_deit_base_distilled_patch16_224''',
type=str,
help='''Name of the DeiT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
UpperCamelCase_ = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 345 |
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: Optional[int] ) -> Any:
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
UpperCAmelCase_ : Union[str, Any] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : str = -1
UpperCAmelCase_ : Dict = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ )
UpperCAmelCase_ : Any = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
UpperCAmelCase_ : List[Any] = TextStreamer(lowerCamelCase_ )
model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ ,streamer=lowerCamelCase_ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
UpperCAmelCase_ : Optional[int] = cs.out[:-1]
self.assertEqual(lowerCamelCase_ ,lowerCamelCase_ )
def A__ ( self: Dict ) -> Optional[Any]:
UpperCAmelCase_ : str = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
UpperCAmelCase_ : Optional[Any] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = -1
UpperCAmelCase_ : List[Any] = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(lowerCamelCase_ )
UpperCAmelCase_ : List[str] = model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ )
UpperCAmelCase_ : Dict = tokenizer.decode(greedy_ids[0] )
UpperCAmelCase_ : str = TextIteratorStreamer(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer}
UpperCAmelCase_ : str = Thread(target=model.generate ,kwargs=lowerCamelCase_ )
thread.start()
UpperCAmelCase_ : int = """"""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(lowerCamelCase_ ,lowerCamelCase_ )
def A__ ( self: List[Any] ) -> Dict:
UpperCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
UpperCAmelCase_ : Optional[Any] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = -1
UpperCAmelCase_ : Tuple = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(lowerCamelCase_ )
UpperCAmelCase_ : Dict = model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ )
UpperCAmelCase_ : str = greedy_ids[:, input_ids.shape[1] :]
UpperCAmelCase_ : Dict = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
UpperCAmelCase_ : List[Any] = TextStreamer(lowerCamelCase_ ,skip_prompt=lowerCamelCase_ )
model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ ,streamer=lowerCamelCase_ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
UpperCAmelCase_ : List[str] = cs.out[:-1]
self.assertEqual(lowerCamelCase_ ,lowerCamelCase_ )
def A__ ( self: str ) -> str:
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
UpperCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained("""distilgpt2""" )
UpperCAmelCase_ : Optional[Any] = AutoModelForCausalLM.from_pretrained("""distilgpt2""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : Any = -1
UpperCAmelCase_ : Union[str, Any] = torch.ones((1, 5) ,device=lowerCamelCase_ ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
UpperCAmelCase_ : Union[str, Any] = TextStreamer(lowerCamelCase_ ,skip_special_tokens=lowerCamelCase_ )
model.generate(lowerCamelCase_ ,max_new_tokens=1 ,do_sample=lowerCamelCase_ ,streamer=lowerCamelCase_ )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
UpperCAmelCase_ : List[str] = cs.out[:-1] # Remove the final "\n"
UpperCAmelCase_ : Dict = tokenizer(lowerCamelCase_ ,return_tensors="""pt""" )
self.assertEqual(streamer_text_tokenized.input_ids.shape ,(1, 1) )
def A__ ( self: List[str] ) -> Any:
UpperCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
UpperCAmelCase_ : Any = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : List[str] = -1
UpperCAmelCase_ : Optional[Any] = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = TextIteratorStreamer(lowerCamelCase_ ,timeout=0.0_0_1 )
UpperCAmelCase_ : Any = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer}
UpperCAmelCase_ : Dict = Thread(target=model.generate ,kwargs=lowerCamelCase_ )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(lowerCamelCase_ ):
UpperCAmelCase_ : Union[str, Any] = """"""
for new_text in streamer:
streamer_text += new_text
| 345 | 1 |
from __future__ import annotations
def lowerCamelCase_ ( _a : list ):
'''simple docstring'''
if not nums:
raise ValueError("""List is empty""" )
return sum(_a ) / len(_a )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 345 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
@property
def A__ ( self: Optional[int] ) -> int:
torch.manual_seed(0 )
UpperCAmelCase_ : Union[str, Any] = UNetaDModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") ,up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") ,)
return model
@property
def A__ ( self: Tuple ) -> Optional[Any]:
torch.manual_seed(0 )
UpperCAmelCase_ : List[str] = VQModel(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=3 ,)
return model
@property
def A__ ( self: Tuple ) -> Any:
torch.manual_seed(0 )
UpperCAmelCase_ : int = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,)
return CLIPTextModel(lowerCamelCase_ )
def A__ ( self: str ) -> Optional[Any]:
UpperCAmelCase_ : str = self.dummy_uncond_unet
UpperCAmelCase_ : List[Any] = DDIMScheduler()
UpperCAmelCase_ : List[Any] = self.dummy_vq_model
UpperCAmelCase_ : Optional[int] = LDMPipeline(unet=lowerCamelCase_ ,vqvae=lowerCamelCase_ ,scheduler=lowerCamelCase_ )
ldm.to(lowerCamelCase_ )
ldm.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : Any = torch.manual_seed(0 )
UpperCAmelCase_ : int = ldm(generator=lowerCamelCase_ ,num_inference_steps=2 ,output_type="""numpy""" ).images
UpperCAmelCase_ : List[str] = torch.manual_seed(0 )
UpperCAmelCase_ : Union[str, Any] = ldm(generator=lowerCamelCase_ ,num_inference_steps=2 ,output_type="""numpy""" ,return_dict=lowerCamelCase_ )[0]
UpperCAmelCase_ : Optional[Any] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ : str = np.array([0.8_5_1_2, 0.8_1_8, 0.6_4_1_1, 0.6_8_0_8, 0.4_4_6_5, 0.5_6_1_8, 0.4_6, 0.6_2_3_1, 0.5_1_7_2] )
UpperCAmelCase_ : Tuple = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: Optional[int] ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = LDMPipeline.from_pretrained("""CompVis/ldm-celebahq-256""" )
ldm.to(lowerCamelCase_ )
ldm.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = torch.manual_seed(0 )
UpperCAmelCase_ : Optional[int] = ldm(generator=lowerCamelCase_ ,num_inference_steps=5 ,output_type="""numpy""" ).images
UpperCAmelCase_ : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCAmelCase_ : int = np.array([0.4_3_9_9, 0.4_4_9_7_5, 0.4_6_8_2_5, 0.4_7_4, 0.4_3_5_9, 0.4_5_8_1, 0.4_5_0_9_5, 0.4_3_4_1, 0.4_4_4_7] )
UpperCAmelCase_ : Union[str, Any] = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 345 | 1 |
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'''facebook/data2vec-base-960h''': '''https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json''',
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : Optional[int] = "data2vec-audio"
def __init__( self: str ,lowerCamelCase_: Optional[Any]=32 ,lowerCamelCase_: List[Any]=768 ,lowerCamelCase_: Optional[int]=12 ,lowerCamelCase_: List[Any]=12 ,lowerCamelCase_: Any=3072 ,lowerCamelCase_: Tuple="gelu" ,lowerCamelCase_: Tuple=0.1 ,lowerCamelCase_: List[str]=0.1 ,lowerCamelCase_: Dict=0.1 ,lowerCamelCase_: Any=0.0 ,lowerCamelCase_: int=0.1 ,lowerCamelCase_: Any=0.1 ,lowerCamelCase_: Optional[Any]=0.0_2 ,lowerCamelCase_: List[str]=1e-5 ,lowerCamelCase_: Tuple="gelu" ,lowerCamelCase_: Tuple=(512, 512, 512, 512, 512, 512, 512) ,lowerCamelCase_: Tuple=(5, 2, 2, 2, 2, 2, 2) ,lowerCamelCase_: str=(10, 3, 3, 3, 3, 2, 2) ,lowerCamelCase_: List[str]=False ,lowerCamelCase_: List[Any]=16 ,lowerCamelCase_: Dict=19 ,lowerCamelCase_: List[str]=5 ,lowerCamelCase_: List[Any]=0.0_5 ,lowerCamelCase_: List[Any]=10 ,lowerCamelCase_: Union[str, Any]=2 ,lowerCamelCase_: int=0.0 ,lowerCamelCase_: Optional[int]=10 ,lowerCamelCase_: Tuple=0 ,lowerCamelCase_: Optional[Any]="sum" ,lowerCamelCase_: List[str]=False ,lowerCamelCase_: Tuple=False ,lowerCamelCase_: str=256 ,lowerCamelCase_: List[str]=(512, 512, 512, 512, 1500) ,lowerCamelCase_: List[Any]=(5, 3, 3, 1, 1) ,lowerCamelCase_: int=(1, 2, 3, 1, 1) ,lowerCamelCase_: Union[str, Any]=512 ,lowerCamelCase_: int=0 ,lowerCamelCase_: List[Any]=1 ,lowerCamelCase_: List[str]=2 ,lowerCamelCase_: Tuple=False ,lowerCamelCase_: Optional[int]=3 ,lowerCamelCase_: List[str]=2 ,lowerCamelCase_: Optional[int]=3 ,lowerCamelCase_: int=None ,**lowerCamelCase_: Optional[Any] ,) -> Any:
super().__init__(**lowerCamelCase_ ,pad_token_id=lowerCamelCase_ ,bos_token_id=lowerCamelCase_ ,eos_token_id=lowerCamelCase_ )
UpperCAmelCase_ : Any = hidden_size
UpperCAmelCase_ : List[Any] = feat_extract_activation
UpperCAmelCase_ : List[str] = list(lowerCamelCase_ )
UpperCAmelCase_ : List[str] = list(lowerCamelCase_ )
UpperCAmelCase_ : Any = list(lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = conv_bias
UpperCAmelCase_ : str = num_conv_pos_embeddings
UpperCAmelCase_ : List[str] = num_conv_pos_embedding_groups
UpperCAmelCase_ : int = conv_pos_kernel_size
UpperCAmelCase_ : List[Any] = len(self.conv_dim )
UpperCAmelCase_ : Optional[Any] = num_hidden_layers
UpperCAmelCase_ : str = intermediate_size
UpperCAmelCase_ : Any = hidden_act
UpperCAmelCase_ : Dict = num_attention_heads
UpperCAmelCase_ : Optional[int] = hidden_dropout
UpperCAmelCase_ : Tuple = attention_dropout
UpperCAmelCase_ : Dict = activation_dropout
UpperCAmelCase_ : Dict = feat_proj_dropout
UpperCAmelCase_ : int = final_dropout
UpperCAmelCase_ : Any = layerdrop
UpperCAmelCase_ : Tuple = layer_norm_eps
UpperCAmelCase_ : Dict = initializer_range
UpperCAmelCase_ : Optional[Any] = vocab_size
UpperCAmelCase_ : Dict = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase_ : Optional[int] = mask_time_prob
UpperCAmelCase_ : Optional[Any] = mask_time_length
UpperCAmelCase_ : str = mask_time_min_masks
UpperCAmelCase_ : List[str] = mask_feature_prob
UpperCAmelCase_ : Optional[int] = mask_feature_length
UpperCAmelCase_ : Dict = mask_feature_min_masks
# ctc loss
UpperCAmelCase_ : Dict = ctc_loss_reduction
UpperCAmelCase_ : Dict = ctc_zero_infinity
# adapter
UpperCAmelCase_ : str = add_adapter
UpperCAmelCase_ : Tuple = adapter_kernel_size
UpperCAmelCase_ : int = adapter_stride
UpperCAmelCase_ : Tuple = num_adapter_layers
UpperCAmelCase_ : Optional[int] = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
UpperCAmelCase_ : int = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
UpperCAmelCase_ : Union[str, Any] = list(lowerCamelCase_ )
UpperCAmelCase_ : Tuple = list(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = list(lowerCamelCase_ )
UpperCAmelCase_ : int = xvector_output_dim
@property
def A__ ( self: Any ) -> Tuple:
return math.prod(self.conv_stride )
| 345 |
def lowerCamelCase_ ( _a : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = [0] * len(_a )
UpperCAmelCase_ : Dict = []
UpperCAmelCase_ : Optional[int] = []
UpperCAmelCase_ : Dict = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(_a ) ):
if indegree[i] == 0:
queue.append(_a )
while queue:
UpperCAmelCase_ : List[str] = queue.pop(0 )
cnt += 1
topo.append(_a )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(_a )
if cnt != len(_a ):
print("""Cycle exists""" )
else:
print(_a )
# Adjacency List of Graph
UpperCamelCase_ = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 345 | 1 |
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowerCamelCase_ ( _a : Tuple , _a : Optional[Any] , _a : str , _a : int , _a : int ):
'''simple docstring'''
with open(_a ) as metadata_file:
UpperCAmelCase_ : int = json.load(_a )
UpperCAmelCase_ : List[str] = LukeConfig(use_entity_aware_attention=_a , **metadata["""model_config"""] )
# Load in the weights from the checkpoint_path
UpperCAmelCase_ : str = torch.load(_a , map_location="""cpu""" )["""module"""]
# Load the entity vocab file
UpperCAmelCase_ : Optional[Any] = load_original_entity_vocab(_a )
# add an entry for [MASK2]
UpperCAmelCase_ : List[Any] = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
UpperCAmelCase_ : Dict = XLMRobertaTokenizer.from_pretrained(metadata["""model_config"""]["""bert_model_name"""] )
# Add special tokens to the token vocabulary for downstream tasks
UpperCAmelCase_ : Tuple = AddedToken("""<ent>""" , lstrip=_a , rstrip=_a )
UpperCAmelCase_ : Dict = AddedToken("""<ent2>""" , lstrip=_a , rstrip=_a )
tokenizer.add_special_tokens({"""additional_special_tokens""": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(_a )
with open(os.path.join(_a , """tokenizer_config.json""" ) , """r""" ) as f:
UpperCAmelCase_ : Tuple = json.load(_a )
UpperCAmelCase_ : Any = """MLukeTokenizer"""
with open(os.path.join(_a , """tokenizer_config.json""" ) , """w""" ) as f:
json.dump(_a , _a )
with open(os.path.join(_a , MLukeTokenizer.vocab_files_names["""entity_vocab_file"""] ) , """w""" ) as f:
json.dump(_a , _a )
UpperCAmelCase_ : int = MLukeTokenizer.from_pretrained(_a )
# Initialize the embeddings of the special tokens
UpperCAmelCase_ : Optional[Any] = tokenizer.convert_tokens_to_ids(["""@"""] )[0]
UpperCAmelCase_ : Union[str, Any] = tokenizer.convert_tokens_to_ids(["""#"""] )[0]
UpperCAmelCase_ : Union[str, Any] = state_dict["""embeddings.word_embeddings.weight"""]
UpperCAmelCase_ : Any = word_emb[ent_init_index].unsqueeze(0 )
UpperCAmelCase_ : Union[str, Any] = word_emb[enta_init_index].unsqueeze(0 )
UpperCAmelCase_ : Optional[int] = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
UpperCAmelCase_ : Tuple = state_dict[bias_name]
UpperCAmelCase_ : str = decoder_bias[ent_init_index].unsqueeze(0 )
UpperCAmelCase_ : int = decoder_bias[enta_init_index].unsqueeze(0 )
UpperCAmelCase_ : Dict = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
UpperCAmelCase_ : Dict = F'''encoder.layer.{layer_index}.attention.self.'''
UpperCAmelCase_ : Any = state_dict[prefix + matrix_name]
UpperCAmelCase_ : Optional[int] = state_dict[prefix + matrix_name]
UpperCAmelCase_ : List[str] = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
UpperCAmelCase_ : Optional[int] = state_dict["""entity_embeddings.entity_embeddings.weight"""]
UpperCAmelCase_ : Optional[Any] = entity_emb[entity_vocab["""[MASK]"""]].unsqueeze(0 )
UpperCAmelCase_ : Tuple = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
UpperCAmelCase_ : Union[str, Any] = state_dict["""entity_predictions.bias"""]
UpperCAmelCase_ : Optional[Any] = entity_prediction_bias[entity_vocab["""[MASK]"""]].unsqueeze(0 )
UpperCAmelCase_ : int = torch.cat([entity_prediction_bias, entity_mask_bias] )
UpperCAmelCase_ : Dict = LukeForMaskedLM(config=_a ).eval()
state_dict.pop("""entity_predictions.decoder.weight""" )
state_dict.pop("""lm_head.decoder.weight""" )
state_dict.pop("""lm_head.decoder.bias""" )
UpperCAmelCase_ : int = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("""lm_head""" ) or key.startswith("""entity_predictions""" )):
UpperCAmelCase_ : Tuple = state_dict[key]
else:
UpperCAmelCase_ : str = state_dict[key]
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = model.load_state_dict(_a , strict=_a )
if set(_a ) != {"luke.embeddings.position_ids"}:
raise ValueError(F'''Unexpected unexpected_keys: {unexpected_keys}''' )
if set(_a ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F'''Unexpected missing_keys: {missing_keys}''' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
UpperCAmelCase_ : Tuple = MLukeTokenizer.from_pretrained(_a , task="""entity_classification""" )
UpperCAmelCase_ : Optional[Any] = """ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."""
UpperCAmelCase_ : str = (0, 9)
UpperCAmelCase_ : Optional[int] = tokenizer(_a , entity_spans=[span] , return_tensors="""pt""" )
UpperCAmelCase_ : Union[str, Any] = model(**_a )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
UpperCAmelCase_ : Dict = torch.Size((1, 33, 768) )
UpperCAmelCase_ : Tuple = torch.tensor([[0.0_8_9_2, 0.0_5_9_6, -0.2_8_1_9], [0.0_1_3_4, 0.1_1_9_9, 0.0_5_7_3], [-0.0_1_6_9, 0.0_9_2_7, 0.0_6_4_4]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , _a , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
UpperCAmelCase_ : List[str] = torch.Size((1, 1, 768) )
UpperCAmelCase_ : Optional[int] = torch.tensor([[-0.1_4_8_2, 0.0_6_0_9, 0.0_3_2_2]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
F''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , _a , atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
UpperCAmelCase_ : List[Any] = MLukeTokenizer.from_pretrained(_a )
UpperCAmelCase_ : Optional[Any] = """Tokyo is the capital of <mask>."""
UpperCAmelCase_ : Any = (24, 30)
UpperCAmelCase_ : Optional[Any] = tokenizer(_a , entity_spans=[span] , return_tensors="""pt""" )
UpperCAmelCase_ : List[Any] = model(**_a )
UpperCAmelCase_ : Tuple = encoding["""input_ids"""][0].tolist()
UpperCAmelCase_ : Optional[int] = input_ids.index(tokenizer.convert_tokens_to_ids("""<mask>""" ) )
UpperCAmelCase_ : int = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(_a )
UpperCAmelCase_ : Any = outputs.entity_logits[0][0].argmax().item()
UpperCAmelCase_ : Any = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("""en:""" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("""Saving PyTorch model to {}""".format(_a ) )
model.save_pretrained(_a )
def lowerCamelCase_ ( _a : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = ["""[MASK]""", """[PAD]""", """[UNK]"""]
UpperCAmelCase_ : Dict = [json.loads(_a ) for line in open(_a )]
UpperCAmelCase_ : int = {}
for entry in data:
UpperCAmelCase_ : Any = entry["""id"""]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
UpperCAmelCase_ : Tuple = entity_id
break
UpperCAmelCase_ : Tuple = F'''{language}:{entity_name}'''
UpperCAmelCase_ : str = entity_id
return new_mapping
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''')
parser.add_argument(
'''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.'''
)
parser.add_argument(
'''--entity_vocab_path''',
default=None,
type=str,
help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.'''
)
parser.add_argument(
'''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.'''
)
UpperCamelCase_ = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 345 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'''microsoft/swinv2-tiny-patch4-window8-256''': (
'''https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json'''
),
}
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : Optional[Any] = "swinv2"
A__ : int = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self: List[str] ,lowerCamelCase_: List[str]=224 ,lowerCamelCase_: List[str]=4 ,lowerCamelCase_: List[Any]=3 ,lowerCamelCase_: Optional[Any]=96 ,lowerCamelCase_: Any=[2, 2, 6, 2] ,lowerCamelCase_: Dict=[3, 6, 12, 24] ,lowerCamelCase_: str=7 ,lowerCamelCase_: Optional[Any]=4.0 ,lowerCamelCase_: Tuple=True ,lowerCamelCase_: List[str]=0.0 ,lowerCamelCase_: Optional[int]=0.0 ,lowerCamelCase_: List[str]=0.1 ,lowerCamelCase_: str="gelu" ,lowerCamelCase_: str=False ,lowerCamelCase_: Dict=0.0_2 ,lowerCamelCase_: Union[str, Any]=1e-5 ,lowerCamelCase_: str=32 ,**lowerCamelCase_: List[str] ,) -> Tuple:
super().__init__(**lowerCamelCase_ )
UpperCAmelCase_ : Tuple = image_size
UpperCAmelCase_ : Tuple = patch_size
UpperCAmelCase_ : Dict = num_channels
UpperCAmelCase_ : List[Any] = embed_dim
UpperCAmelCase_ : Dict = depths
UpperCAmelCase_ : Dict = len(lowerCamelCase_ )
UpperCAmelCase_ : str = num_heads
UpperCAmelCase_ : Tuple = window_size
UpperCAmelCase_ : int = mlp_ratio
UpperCAmelCase_ : str = qkv_bias
UpperCAmelCase_ : Any = hidden_dropout_prob
UpperCAmelCase_ : Tuple = attention_probs_dropout_prob
UpperCAmelCase_ : int = drop_path_rate
UpperCAmelCase_ : Optional[Any] = hidden_act
UpperCAmelCase_ : List[str] = use_absolute_embeddings
UpperCAmelCase_ : Dict = layer_norm_eps
UpperCAmelCase_ : int = initializer_range
UpperCAmelCase_ : Union[str, Any] = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCAmelCase_ : List[str] = int(embed_dim * 2 ** (len(lowerCamelCase_ ) - 1) )
UpperCAmelCase_ : Any = (0, 0, 0, 0)
| 345 | 1 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ = {
'''configuration_mctct''': ['''MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MCTCTConfig'''],
'''feature_extraction_mctct''': ['''MCTCTFeatureExtractor'''],
'''processing_mctct''': ['''MCTCTProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'''MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MCTCTForCTC''',
'''MCTCTModel''',
'''MCTCTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 345 |
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: int ) -> str:
# A mock response for an HTTP head request to emulate server down
UpperCAmelCase_ : List[str] = mock.Mock()
UpperCAmelCase_ : List[Any] = 500
UpperCAmelCase_ : Union[str, Any] = {}
UpperCAmelCase_ : Union[str, Any] = HTTPError
UpperCAmelCase_ : Any = {}
# Download this model to make sure it's in the cache.
UpperCAmelCase_ : Union[str, Any] = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("""requests.Session.request""" ,return_value=lowerCamelCase_ ) as mock_head:
UpperCAmelCase_ : Any = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def A__ ( self: str ) -> int:
# A mock response for an HTTP head request to emulate server down
UpperCAmelCase_ : str = mock.Mock()
UpperCAmelCase_ : Optional[int] = 500
UpperCAmelCase_ : int = {}
UpperCAmelCase_ : Union[str, Any] = HTTPError
UpperCAmelCase_ : List[Any] = {}
# Download this model to make sure it's in the cache.
UpperCAmelCase_ : Optional[int] = GPTaTokenizerFast.from_pretrained("""gpt2""" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("""requests.Session.request""" ,return_value=lowerCamelCase_ ) as mock_head:
UpperCAmelCase_ : Any = GPTaTokenizerFast.from_pretrained("""gpt2""" )
# This check we did call the fake head request
mock_head.assert_called()
def A__ ( self: str ) -> Dict:
# This test is for deprecated behavior and can be removed in v5
try:
UpperCAmelCase_ : Any = tempfile.mktemp()
with open(lowerCamelCase_ ,"""wb""" ) as f:
http_get("""https://huggingface.co/albert-base-v1/resolve/main/spiece.model""" ,lowerCamelCase_ )
UpperCAmelCase_ : Tuple = AlbertTokenizer.from_pretrained(lowerCamelCase_ )
finally:
os.remove(lowerCamelCase_ )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile("""tokenizer.json""" ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open("""tokenizer.json""" ,"""wb""" ) as f:
http_get("""https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json""" ,lowerCamelCase_ )
UpperCAmelCase_ : str = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size ,1000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove("""tokenizer.json""" )
def A__ ( self: List[str] ) -> Tuple:
# This test is for deprecated behavior and can be removed in v5
UpperCAmelCase_ : str = AlbertTokenizer.from_pretrained("""https://huggingface.co/albert-base-v1/resolve/main/spiece.model""" )
@is_staging_test
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
A__ : str = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def A__ ( cls: Dict ) -> Optional[int]:
UpperCAmelCase_ : List[str] = TOKEN
HfFolder.save_token(lowerCamelCase_ )
@classmethod
def A__ ( cls: Optional[Any] ) -> List[str]:
try:
delete_repo(token=cls._token ,repo_id="""test-tokenizer""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id="""valid_org/test-tokenizer-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id="""test-dynamic-tokenizer""" )
except HTTPError:
pass
def A__ ( self: Any ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ : Tuple = os.path.join(lowerCamelCase_ ,"""vocab.txt""" )
with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
UpperCAmelCase_ : List[Any] = BertTokenizer(lowerCamelCase_ )
tokenizer.push_to_hub("""test-tokenizer""" ,use_auth_token=self._token )
UpperCAmelCase_ : List[Any] = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
# Reset repo
delete_repo(token=self._token ,repo_id="""test-tokenizer""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowerCamelCase_ ,repo_id="""test-tokenizer""" ,push_to_hub=lowerCamelCase_ ,use_auth_token=self._token )
UpperCAmelCase_ : List[Any] = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
def A__ ( self: Optional[int] ) -> Any:
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ : List[Any] = os.path.join(lowerCamelCase_ ,"""vocab.txt""" )
with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
UpperCAmelCase_ : Dict = BertTokenizer(lowerCamelCase_ )
tokenizer.push_to_hub("""valid_org/test-tokenizer-org""" ,use_auth_token=self._token )
UpperCAmelCase_ : Dict = BertTokenizer.from_pretrained("""valid_org/test-tokenizer-org""" )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
# Reset repo
delete_repo(token=self._token ,repo_id="""valid_org/test-tokenizer-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
lowerCamelCase_ ,repo_id="""valid_org/test-tokenizer-org""" ,push_to_hub=lowerCamelCase_ ,use_auth_token=self._token )
UpperCAmelCase_ : List[Any] = BertTokenizer.from_pretrained("""valid_org/test-tokenizer-org""" )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
@require_tokenizers
def A__ ( self: Optional[int] ) -> Optional[Any]:
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ : Any = os.path.join(lowerCamelCase_ ,"""vocab.txt""" )
with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
UpperCAmelCase_ : Optional[Any] = CustomTokenizer(lowerCamelCase_ )
# No fast custom tokenizer
tokenizer.push_to_hub("""test-dynamic-tokenizer""" ,use_auth_token=self._token )
UpperCAmelCase_ : Optional[Any] = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''' ,trust_remote_code=lowerCamelCase_ )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,"""CustomTokenizer""" )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ : List[str] = os.path.join(lowerCamelCase_ ,"""vocab.txt""" )
with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
UpperCAmelCase_ : str = BertTokenizerFast.from_pretrained(lowerCamelCase_ )
bert_tokenizer.save_pretrained(lowerCamelCase_ )
UpperCAmelCase_ : List[str] = CustomTokenizerFast.from_pretrained(lowerCamelCase_ )
tokenizer.push_to_hub("""test-dynamic-tokenizer""" ,use_auth_token=self._token )
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''' ,trust_remote_code=lowerCamelCase_ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,"""CustomTokenizerFast""" )
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(
F'''{USER}/test-dynamic-tokenizer''' ,use_fast=lowerCamelCase_ ,trust_remote_code=lowerCamelCase_ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,"""CustomTokenizer""" )
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: Optional[Any] ) -> Any:
UpperCAmelCase_ : Any = Trie()
trie.add("""Hello 友達""" )
self.assertEqual(trie.data ,{"""H""": {"""e""": {"""l""": {"""l""": {"""o""": {""" """: {"""友""": {"""達""": {"""""": 1}}}}}}}}} )
trie.add("""Hello""" )
trie.data
self.assertEqual(trie.data ,{"""H""": {"""e""": {"""l""": {"""l""": {"""o""": {"""""": 1, """ """: {"""友""": {"""達""": {"""""": 1}}}}}}}}} )
def A__ ( self: Tuple ) -> Optional[int]:
UpperCAmelCase_ : str = Trie()
self.assertEqual(trie.split("""[CLS] This is a extra_id_100""" ) ,["""[CLS] This is a extra_id_100"""] )
trie.add("""[CLS]""" )
trie.add("""extra_id_1""" )
trie.add("""extra_id_100""" )
self.assertEqual(trie.split("""[CLS] This is a extra_id_100""" ) ,["""[CLS]""", """ This is a """, """extra_id_100"""] )
def A__ ( self: Optional[Any] ) -> Optional[int]:
UpperCAmelCase_ : Dict = Trie()
trie.add("""A""" )
self.assertEqual(trie.split("""ABC""" ) ,["""A""", """BC"""] )
self.assertEqual(trie.split("""BCA""" ) ,["""BC""", """A"""] )
def A__ ( self: Union[str, Any] ) -> int:
UpperCAmelCase_ : List[str] = Trie()
trie.add("""TOKEN]""" )
trie.add("""[SPECIAL_TOKEN]""" )
self.assertEqual(trie.split("""This is something [SPECIAL_TOKEN]""" ) ,["""This is something """, """[SPECIAL_TOKEN]"""] )
def A__ ( self: int ) -> Union[str, Any]:
UpperCAmelCase_ : List[str] = Trie()
trie.add("""A""" )
trie.add("""P""" )
trie.add("""[SPECIAL_TOKEN]""" )
self.assertEqual(trie.split("""This is something [SPECIAL_TOKEN]""" ) ,["""This is something """, """[SPECIAL_TOKEN]"""] )
def A__ ( self: int ) -> List[str]:
UpperCAmelCase_ : int = Trie()
trie.add("""AB""" )
trie.add("""B""" )
trie.add("""C""" )
self.assertEqual(trie.split("""ABC""" ) ,["""AB""", """C"""] )
def A__ ( self: str ) -> Optional[int]:
UpperCAmelCase_ : Optional[Any] = Trie()
trie.add("""ABC""" )
trie.add("""B""" )
trie.add("""CD""" )
self.assertEqual(trie.split("""ABCD""" ) ,["""ABC""", """D"""] )
def A__ ( self: List[Any] ) -> Any:
# Even if the offsets are wrong, we necessarily output correct string
# parts.
UpperCAmelCase_ : Tuple = Trie()
UpperCAmelCase_ : Optional[Any] = trie.cut_text("""ABC""" ,[0, 0, 2, 1, 2, 3] )
self.assertEqual(lowerCamelCase_ ,["""AB""", """C"""] )
| 345 | 1 |
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
UpperCamelCase_ = logging.get_logger(__name__)
@add_end_docstrings(__snake_case )
class _snake_case ( __snake_case ):
'''simple docstring'''
def __init__( self: Union[str, Any] ,**lowerCamelCase_: List[str] ) -> Tuple:
super().__init__(**lowerCamelCase_ )
if self.framework != "pt":
raise ValueError(F'''The {self.__class__} is only available in PyTorch.''' )
# No specific FOR_XXX available yet
def __call__( self: Optional[int] ,lowerCamelCase_: Union[np.ndarray, bytes, str] ,**lowerCamelCase_: str ) -> List[str]:
return super().__call__(lowerCamelCase_ ,**lowerCamelCase_ )
def A__ ( self: int ,**lowerCamelCase_: Union[str, Any] ) -> Optional[int]:
UpperCAmelCase_ : Dict = {}
if "candidate_labels" in kwargs:
UpperCAmelCase_ : str = kwargs["""candidate_labels"""]
if "hypothesis_template" in kwargs:
UpperCAmelCase_ : Optional[int] = kwargs["""hypothesis_template"""]
return preprocess_params, {}, {}
def A__ ( self: Any ,lowerCamelCase_: List[Any] ,lowerCamelCase_: str=None ,lowerCamelCase_: Optional[Any]="This is a sound of {}." ) -> Optional[int]:
if isinstance(lowerCamelCase_ ,lowerCamelCase_ ):
if audio.startswith("""http://""" ) or audio.startswith("""https://""" ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
UpperCAmelCase_ : Any = requests.get(lowerCamelCase_ ).content
else:
with open(lowerCamelCase_ ,"""rb""" ) as f:
UpperCAmelCase_ : Union[str, Any] = f.read()
if isinstance(lowerCamelCase_ ,lowerCamelCase_ ):
UpperCAmelCase_ : Any = ffmpeg_read(lowerCamelCase_ ,self.feature_extractor.sampling_rate )
if not isinstance(lowerCamelCase_ ,np.ndarray ):
raise ValueError("""We expect a numpy ndarray as input""" )
if len(audio.shape ) != 1:
raise ValueError("""We expect a single channel audio input for ZeroShotAudioClassificationPipeline""" )
UpperCAmelCase_ : Union[str, Any] = self.feature_extractor(
[audio] ,sampling_rate=self.feature_extractor.sampling_rate ,return_tensors="""pt""" )
UpperCAmelCase_ : List[Any] = candidate_labels
UpperCAmelCase_ : Optional[Any] = [hypothesis_template.format(lowerCamelCase_ ) for x in candidate_labels]
UpperCAmelCase_ : List[str] = self.tokenizer(lowerCamelCase_ ,return_tensors=self.framework ,padding=lowerCamelCase_ )
UpperCAmelCase_ : Dict = [text_inputs]
return inputs
def A__ ( self: List[Any] ,lowerCamelCase_: Optional[Any] ) -> str:
UpperCAmelCase_ : int = model_inputs.pop("""candidate_labels""" )
UpperCAmelCase_ : Dict = model_inputs.pop("""text_inputs""" )
if isinstance(text_inputs[0] ,lowerCamelCase_ ):
UpperCAmelCase_ : Union[str, Any] = text_inputs[0]
else:
# Batching case.
UpperCAmelCase_ : Optional[Any] = text_inputs[0][0]
UpperCAmelCase_ : Union[str, Any] = self.model(**lowerCamelCase_ ,**lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = {
"""candidate_labels""": candidate_labels,
"""logits""": outputs.logits_per_audio,
}
return model_outputs
def A__ ( self: List[Any] ,lowerCamelCase_: Dict ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[Any] = model_outputs.pop("""candidate_labels""" )
UpperCAmelCase_ : int = model_outputs["""logits"""][0]
if self.framework == "pt":
UpperCAmelCase_ : Tuple = logits.softmax(dim=0 )
UpperCAmelCase_ : Optional[int] = probs.tolist()
else:
raise ValueError("""`tf` framework not supported.""" )
UpperCAmelCase_ : Any = [
{"""score""": score, """label""": candidate_label}
for score, candidate_label in sorted(zip(lowerCamelCase_ ,lowerCamelCase_ ) ,key=lambda lowerCamelCase_ : -x[0] )
]
return result
| 345 |
from ..utils import DummyObject, requires_backends
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Tuple = ["flax"]
def __init__( self: str ,*lowerCamelCase_: int ,**lowerCamelCase_: List[str] ) -> str:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Optional[Any] ,*lowerCamelCase_: Dict ,**lowerCamelCase_: List[str] ) -> Any:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: Optional[int] ,*lowerCamelCase_: Optional[int] ,**lowerCamelCase_: int ) -> Optional[int]:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Any = ["flax"]
def __init__( self: int ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: Tuple ) -> Union[str, Any]:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Optional[int] ,*lowerCamelCase_: Optional[int] ,**lowerCamelCase_: List[str] ) -> Union[str, Any]:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: Tuple ,*lowerCamelCase_: Tuple ,**lowerCamelCase_: Any ) -> int:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Dict = ["flax"]
def __init__( self: Dict ,*lowerCamelCase_: Optional[int] ,**lowerCamelCase_: List[Any] ) -> Any:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Tuple ,*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: List[Any] ) -> str:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: int ,*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: Optional[Any] ) -> int:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : List[str] = ["flax"]
def __init__( self: str ,*lowerCamelCase_: List[str] ,**lowerCamelCase_: Optional[int] ) -> Union[str, Any]:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Union[str, Any] ,*lowerCamelCase_: Any ,**lowerCamelCase_: Any ) -> Any:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: Dict ,*lowerCamelCase_: int ,**lowerCamelCase_: Optional[Any] ) -> int:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : int = ["flax"]
def __init__( self: Dict ,*lowerCamelCase_: Tuple ,**lowerCamelCase_: List[str] ) -> Optional[Any]:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Optional[Any] ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: str ) -> Any:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: Union[str, Any] ,*lowerCamelCase_: Dict ,**lowerCamelCase_: Optional[Any] ) -> str:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Optional[int] = ["flax"]
def __init__( self: str ,*lowerCamelCase_: Dict ,**lowerCamelCase_: Optional[int] ) -> Tuple:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: int ,*lowerCamelCase_: int ,**lowerCamelCase_: Tuple ) -> List[str]:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: str ,*lowerCamelCase_: Union[str, Any] ,**lowerCamelCase_: Optional[Any] ) -> Any:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : List[Any] = ["flax"]
def __init__( self: Union[str, Any] ,*lowerCamelCase_: Tuple ,**lowerCamelCase_: int ) -> List[Any]:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Tuple ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: Dict ) -> Dict:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: Dict ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: str ) -> Any:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Tuple = ["flax"]
def __init__( self: str ,*lowerCamelCase_: Any ,**lowerCamelCase_: int ) -> Tuple:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Dict ,*lowerCamelCase_: Optional[int] ,**lowerCamelCase_: Union[str, Any] ) -> List[str]:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: str ,*lowerCamelCase_: Union[str, Any] ,**lowerCamelCase_: Dict ) -> Optional[int]:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : str = ["flax"]
def __init__( self: Optional[Any] ,*lowerCamelCase_: str ,**lowerCamelCase_: List[str] ) -> Optional[Any]:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: List[str] ,*lowerCamelCase_: Dict ,**lowerCamelCase_: int ) -> List[str]:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: str ,*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: int ) -> Union[str, Any]:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Union[str, Any] = ["flax"]
def __init__( self: Any ,*lowerCamelCase_: Tuple ,**lowerCamelCase_: Optional[int] ) -> List[str]:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Optional[int] ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: str ) -> Union[str, Any]:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: List[Any] ,*lowerCamelCase_: Any ,**lowerCamelCase_: Any ) -> int:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Tuple = ["flax"]
def __init__( self: Any ,*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: Dict ) -> str:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Tuple ,*lowerCamelCase_: Union[str, Any] ,**lowerCamelCase_: List[str] ) -> int:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: List[Any] ,*lowerCamelCase_: str ,**lowerCamelCase_: str ) -> Any:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Optional[Any] = ["flax"]
def __init__( self: Dict ,*lowerCamelCase_: int ,**lowerCamelCase_: Optional[Any] ) -> Union[str, Any]:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: int ,*lowerCamelCase_: int ,**lowerCamelCase_: Tuple ) -> Union[str, Any]:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: Optional[Any] ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: Optional[int] ) -> int:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Optional[int] = ["flax"]
def __init__( self: List[str] ,*lowerCamelCase_: Dict ,**lowerCamelCase_: Dict ) -> int:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Dict ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: Dict ) -> Union[str, Any]:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: int ,*lowerCamelCase_: Any ,**lowerCamelCase_: Any ) -> Optional[Any]:
requires_backends(cls ,["""flax"""] )
| 345 | 1 |
def lowerCamelCase_ ( _a : int , _a : int ):
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError("""the value of both inputs must be positive""" )
UpperCAmelCase_ : Union[str, Any] = str(bin(_a ) )[2:] # remove the leading "0b"
UpperCAmelCase_ : Union[str, Any] = str(bin(_a ) )[2:]
UpperCAmelCase_ : List[Any] = max(len(_a ) , len(_a ) )
return "0b" + "".join(
str(int("""1""" in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(_a ) , b_binary.zfill(_a ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 345 |
import random
from typing import Any
def lowerCamelCase_ ( _a : list ):
'''simple docstring'''
for _ in range(len(_a ) ):
UpperCAmelCase_ : Tuple = random.randint(0 , len(_a ) - 1 )
UpperCAmelCase_ : List[Any] = random.randint(0 , len(_a ) - 1 )
UpperCAmelCase_ , UpperCAmelCase_ : int = data[b], data[a]
return data
if __name__ == "__main__":
UpperCamelCase_ = [0, 1, 2, 3, 4, 5, 6, 7]
UpperCamelCase_ = ['''python''', '''says''', '''hello''', '''!''']
print('''Fisher-Yates Shuffle:''')
print('''List''', integers, strings)
print('''FY Shuffle''', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 345 | 1 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json''',
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : Tuple = "blenderbot-small"
A__ : List[str] = ["past_key_values"]
A__ : Any = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self: str ,lowerCamelCase_: List[Any]=50265 ,lowerCamelCase_: Any=512 ,lowerCamelCase_: Any=8 ,lowerCamelCase_: str=2048 ,lowerCamelCase_: Any=16 ,lowerCamelCase_: List[Any]=8 ,lowerCamelCase_: Union[str, Any]=2048 ,lowerCamelCase_: List[Any]=16 ,lowerCamelCase_: Tuple=0.0 ,lowerCamelCase_: Dict=0.0 ,lowerCamelCase_: Union[str, Any]=True ,lowerCamelCase_: Any=True ,lowerCamelCase_: Tuple="gelu" ,lowerCamelCase_: Any=512 ,lowerCamelCase_: Optional[int]=0.1 ,lowerCamelCase_: Any=0.0 ,lowerCamelCase_: Dict=0.0 ,lowerCamelCase_: List[Any]=0.0_2 ,lowerCamelCase_: str=1 ,lowerCamelCase_: str=False ,lowerCamelCase_: int=0 ,lowerCamelCase_: Any=1 ,lowerCamelCase_: int=2 ,lowerCamelCase_: Dict=2 ,**lowerCamelCase_: Optional[Any] ,) -> Any:
UpperCAmelCase_ : Any = vocab_size
UpperCAmelCase_ : List[Any] = max_position_embeddings
UpperCAmelCase_ : Dict = d_model
UpperCAmelCase_ : Optional[int] = encoder_ffn_dim
UpperCAmelCase_ : str = encoder_layers
UpperCAmelCase_ : List[Any] = encoder_attention_heads
UpperCAmelCase_ : Tuple = decoder_ffn_dim
UpperCAmelCase_ : Optional[int] = decoder_layers
UpperCAmelCase_ : List[Any] = decoder_attention_heads
UpperCAmelCase_ : str = dropout
UpperCAmelCase_ : Dict = attention_dropout
UpperCAmelCase_ : int = activation_dropout
UpperCAmelCase_ : Optional[Any] = activation_function
UpperCAmelCase_ : int = init_std
UpperCAmelCase_ : int = encoder_layerdrop
UpperCAmelCase_ : List[Any] = decoder_layerdrop
UpperCAmelCase_ : Union[str, Any] = use_cache
UpperCAmelCase_ : List[Any] = encoder_layers
UpperCAmelCase_ : str = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowerCamelCase_ ,bos_token_id=lowerCamelCase_ ,eos_token_id=lowerCamelCase_ ,is_encoder_decoder=lowerCamelCase_ ,decoder_start_token_id=lowerCamelCase_ ,forced_eos_token_id=lowerCamelCase_ ,**lowerCamelCase_ ,)
class _snake_case ( __snake_case ):
'''simple docstring'''
@property
def A__ ( self: int ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase_ : str = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
UpperCAmelCase_ : Union[str, Any] = {0: """batch"""}
UpperCAmelCase_ : Tuple = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
UpperCAmelCase_ : Union[str, Any] = {0: """batch""", 1: """decoder_sequence"""}
UpperCAmelCase_ : int = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase_ ,direction="""inputs""" )
elif self.task == "causal-lm":
# TODO: figure this case out.
UpperCAmelCase_ : List[Any] = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.num_layers
for i in range(lowerCamelCase_ ):
UpperCAmelCase_ : Any = {0: """batch""", 2: """past_sequence + sequence"""}
UpperCAmelCase_ : Dict = {0: """batch""", 2: """past_sequence + sequence"""}
else:
UpperCAmelCase_ : Any = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}),
("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}),
] )
return common_inputs
@property
def A__ ( self: Tuple ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase_ : Dict = super().outputs
else:
UpperCAmelCase_ : Union[str, Any] = super(lowerCamelCase_ ,self ).outputs
if self.use_past:
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.num_layers
for i in range(lowerCamelCase_ ):
UpperCAmelCase_ : Union[str, Any] = {0: """batch""", 2: """past_sequence + sequence"""}
UpperCAmelCase_ : Optional[Any] = {0: """batch""", 2: """past_sequence + sequence"""}
return common_outputs
def A__ ( self: List[Any] ,lowerCamelCase_: PreTrainedTokenizer ,lowerCamelCase_: int = -1 ,lowerCamelCase_: int = -1 ,lowerCamelCase_: bool = False ,lowerCamelCase_: Optional[TensorType] = None ,) -> Mapping[str, Any]:
UpperCAmelCase_ : Optional[int] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
# Generate decoder inputs
UpperCAmelCase_ : Union[str, Any] = seq_length if not self.use_past else 1
UpperCAmelCase_ : Dict = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = {F'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
UpperCAmelCase_ : Tuple = dict(**lowerCamelCase_ ,**lowerCamelCase_ )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
UpperCAmelCase_ , UpperCAmelCase_ : int = common_inputs["""input_ids"""].shape
UpperCAmelCase_ : Union[str, Any] = common_inputs["""decoder_input_ids"""].shape[1]
UpperCAmelCase_ , UpperCAmelCase_ : Dict = self.num_attention_heads
UpperCAmelCase_ : List[Any] = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCAmelCase_ : List[str] = decoder_seq_length + 3
UpperCAmelCase_ : int = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
UpperCAmelCase_ : Union[str, Any] = torch.cat(
[common_inputs["""decoder_attention_mask"""], torch.ones(lowerCamelCase_ ,lowerCamelCase_ )] ,dim=1 )
UpperCAmelCase_ : List[str] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.num_layers
UpperCAmelCase_ : Dict = min(lowerCamelCase_ ,lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = max(lowerCamelCase_ ,lowerCamelCase_ ) - min_num_layers
UpperCAmelCase_ : str = """encoder""" if num_encoder_layers > num_decoder_layers else """decoder"""
for _ in range(lowerCamelCase_ ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowerCamelCase_ ),
torch.zeros(lowerCamelCase_ ),
torch.zeros(lowerCamelCase_ ),
torch.zeros(lowerCamelCase_ ),
) )
# TODO: test this.
UpperCAmelCase_ : Any = encoder_shape if remaining_side_name == """encoder""" else decoder_shape
for _ in range(lowerCamelCase_ ,lowerCamelCase_ ):
common_inputs["past_key_values"].append((torch.zeros(lowerCamelCase_ ), torch.zeros(lowerCamelCase_ )) )
return common_inputs
def A__ ( self: Optional[int] ,lowerCamelCase_: PreTrainedTokenizer ,lowerCamelCase_: int = -1 ,lowerCamelCase_: int = -1 ,lowerCamelCase_: bool = False ,lowerCamelCase_: Optional[TensorType] = None ,) -> Mapping[str, Any]:
UpperCAmelCase_ : Dict = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
UpperCAmelCase_ , UpperCAmelCase_ : Dict = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
UpperCAmelCase_ : Union[str, Any] = seqlen + 2
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.num_layers
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.num_attention_heads
UpperCAmelCase_ : str = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCAmelCase_ : Optional[Any] = common_inputs["""attention_mask"""].dtype
UpperCAmelCase_ : Union[str, Any] = torch.cat(
[common_inputs["""attention_mask"""], torch.ones(lowerCamelCase_ ,lowerCamelCase_ ,dtype=lowerCamelCase_ )] ,dim=1 )
UpperCAmelCase_ : Optional[int] = [
(torch.zeros(lowerCamelCase_ ), torch.zeros(lowerCamelCase_ )) for _ in range(lowerCamelCase_ )
]
return common_inputs
def A__ ( self: str ,lowerCamelCase_: PreTrainedTokenizer ,lowerCamelCase_: int = -1 ,lowerCamelCase_: int = -1 ,lowerCamelCase_: bool = False ,lowerCamelCase_: Optional[TensorType] = None ,) -> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCAmelCase_ : List[str] = compute_effective_axis_dimension(
lowerCamelCase_ ,fixed_dimension=OnnxConfig.default_fixed_batch ,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCAmelCase_ : Union[str, Any] = tokenizer.num_special_tokens_to_add(lowerCamelCase_ )
UpperCAmelCase_ : Tuple = compute_effective_axis_dimension(
lowerCamelCase_ ,fixed_dimension=OnnxConfig.default_fixed_sequence ,num_token_to_add=lowerCamelCase_ )
# Generate dummy inputs according to compute batch and sequence
UpperCAmelCase_ : Dict = [""" """.join([tokenizer.unk_token] ) * seq_length] * batch_size
UpperCAmelCase_ : List[str] = dict(tokenizer(lowerCamelCase_ ,return_tensors=lowerCamelCase_ ) )
return common_inputs
def A__ ( self: Optional[Any] ,lowerCamelCase_: PreTrainedTokenizer ,lowerCamelCase_: int = -1 ,lowerCamelCase_: int = -1 ,lowerCamelCase_: bool = False ,lowerCamelCase_: Optional[TensorType] = None ,) -> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase_ : Tuple = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowerCamelCase_ ,batch_size=lowerCamelCase_ ,seq_length=lowerCamelCase_ ,is_pair=lowerCamelCase_ ,framework=lowerCamelCase_ )
elif self.task == "causal-lm":
UpperCAmelCase_ : Optional[int] = self._generate_dummy_inputs_for_causal_lm(
lowerCamelCase_ ,batch_size=lowerCamelCase_ ,seq_length=lowerCamelCase_ ,is_pair=lowerCamelCase_ ,framework=lowerCamelCase_ )
else:
UpperCAmelCase_ : Optional[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase_ ,batch_size=lowerCamelCase_ ,seq_length=lowerCamelCase_ ,is_pair=lowerCamelCase_ ,framework=lowerCamelCase_ )
return common_inputs
def A__ ( self: Any ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: Any ,lowerCamelCase_: List[Any] ) -> Dict:
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase_ : List[str] = super()._flatten_past_key_values_(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
else:
UpperCAmelCase_ : Optional[Any] = super(lowerCamelCase_ ,self )._flatten_past_key_values_(
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
| 345 |
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class _snake_case ( nn.Module ):
'''simple docstring'''
A__ : int
A__ : int
A__ : float = 0.0
A__ : int = 1
A__ : int = 1
A__ : bool = True
A__ : bool = False
A__ : bool = False
A__ : bool = False
A__ : jnp.dtype = jnp.floataa
def A__ ( self: Dict ) -> List[str]:
UpperCAmelCase_ : Optional[int] = []
UpperCAmelCase_ : Optional[int] = []
for i in range(self.num_layers ):
UpperCAmelCase_ : List[Any] = self.in_channels if i == 0 else self.out_channels
UpperCAmelCase_ : List[Any] = FlaxResnetBlockaD(
in_channels=lowerCamelCase_ ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = FlaxTransformeraDModel(
in_channels=self.out_channels ,n_heads=self.num_attention_heads ,d_head=self.out_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,only_cross_attention=self.only_cross_attention ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
attentions.append(lowerCamelCase_ )
UpperCAmelCase_ : int = resnets
UpperCAmelCase_ : Tuple = attentions
if self.add_downsample:
UpperCAmelCase_ : List[Any] = FlaxDownsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self: Optional[Any] ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: str ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: int=True ) -> int:
UpperCAmelCase_ : List[Any] = ()
for resnet, attn in zip(self.resnets ,self.attentions ):
UpperCAmelCase_ : str = resnet(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = attn(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
output_states += (hidden_states,)
if self.add_downsample:
UpperCAmelCase_ : List[Any] = self.downsamplers_a(lowerCamelCase_ )
output_states += (hidden_states,)
return hidden_states, output_states
class _snake_case ( nn.Module ):
'''simple docstring'''
A__ : int
A__ : int
A__ : float = 0.0
A__ : int = 1
A__ : bool = True
A__ : jnp.dtype = jnp.floataa
def A__ ( self: Dict ) -> int:
UpperCAmelCase_ : List[str] = []
for i in range(self.num_layers ):
UpperCAmelCase_ : int = self.in_channels if i == 0 else self.out_channels
UpperCAmelCase_ : Dict = FlaxResnetBlockaD(
in_channels=lowerCamelCase_ ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = resnets
if self.add_downsample:
UpperCAmelCase_ : List[str] = FlaxDownsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self: Any ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Any ,lowerCamelCase_: List[Any]=True ) -> Any:
UpperCAmelCase_ : Union[str, Any] = ()
for resnet in self.resnets:
UpperCAmelCase_ : Tuple = resnet(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
output_states += (hidden_states,)
if self.add_downsample:
UpperCAmelCase_ : List[str] = self.downsamplers_a(lowerCamelCase_ )
output_states += (hidden_states,)
return hidden_states, output_states
class _snake_case ( nn.Module ):
'''simple docstring'''
A__ : int
A__ : int
A__ : int
A__ : float = 0.0
A__ : int = 1
A__ : int = 1
A__ : bool = True
A__ : bool = False
A__ : bool = False
A__ : bool = False
A__ : jnp.dtype = jnp.floataa
def A__ ( self: str ) -> Any:
UpperCAmelCase_ : Dict = []
UpperCAmelCase_ : List[str] = []
for i in range(self.num_layers ):
UpperCAmelCase_ : int = self.in_channels if (i == self.num_layers - 1) else self.out_channels
UpperCAmelCase_ : int = self.prev_output_channel if i == 0 else self.out_channels
UpperCAmelCase_ : Optional[Any] = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(lowerCamelCase_ )
UpperCAmelCase_ : int = FlaxTransformeraDModel(
in_channels=self.out_channels ,n_heads=self.num_attention_heads ,d_head=self.out_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,only_cross_attention=self.only_cross_attention ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
attentions.append(lowerCamelCase_ )
UpperCAmelCase_ : List[str] = resnets
UpperCAmelCase_ : Dict = attentions
if self.add_upsample:
UpperCAmelCase_ : Optional[Any] = FlaxUpsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self: Optional[int] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: int ,lowerCamelCase_: Any ,lowerCamelCase_: str ,lowerCamelCase_: List[str]=True ) -> List[str]:
for resnet, attn in zip(self.resnets ,self.attentions ):
# pop res hidden states
UpperCAmelCase_ : List[str] = res_hidden_states_tuple[-1]
UpperCAmelCase_ : Union[str, Any] = res_hidden_states_tuple[:-1]
UpperCAmelCase_ : Optional[Any] = jnp.concatenate((hidden_states, res_hidden_states) ,axis=-1 )
UpperCAmelCase_ : Tuple = resnet(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = attn(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
if self.add_upsample:
UpperCAmelCase_ : Dict = self.upsamplers_a(lowerCamelCase_ )
return hidden_states
class _snake_case ( nn.Module ):
'''simple docstring'''
A__ : int
A__ : int
A__ : int
A__ : float = 0.0
A__ : int = 1
A__ : bool = True
A__ : jnp.dtype = jnp.floataa
def A__ ( self: Dict ) -> Dict:
UpperCAmelCase_ : Any = []
for i in range(self.num_layers ):
UpperCAmelCase_ : str = self.in_channels if (i == self.num_layers - 1) else self.out_channels
UpperCAmelCase_ : Optional[int] = self.prev_output_channel if i == 0 else self.out_channels
UpperCAmelCase_ : Any = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(lowerCamelCase_ )
UpperCAmelCase_ : str = resnets
if self.add_upsample:
UpperCAmelCase_ : Union[str, Any] = FlaxUpsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self: Dict ,lowerCamelCase_: Dict ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Any=True ) -> List[str]:
for resnet in self.resnets:
# pop res hidden states
UpperCAmelCase_ : Dict = res_hidden_states_tuple[-1]
UpperCAmelCase_ : str = res_hidden_states_tuple[:-1]
UpperCAmelCase_ : List[Any] = jnp.concatenate((hidden_states, res_hidden_states) ,axis=-1 )
UpperCAmelCase_ : List[str] = resnet(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
if self.add_upsample:
UpperCAmelCase_ : Optional[Any] = self.upsamplers_a(lowerCamelCase_ )
return hidden_states
class _snake_case ( nn.Module ):
'''simple docstring'''
A__ : int
A__ : float = 0.0
A__ : int = 1
A__ : int = 1
A__ : bool = False
A__ : bool = False
A__ : jnp.dtype = jnp.floataa
def A__ ( self: Dict ) -> List[str]:
# there is always at least one resnet
UpperCAmelCase_ : List[Any] = [
FlaxResnetBlockaD(
in_channels=self.in_channels ,out_channels=self.in_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
]
UpperCAmelCase_ : Any = []
for _ in range(self.num_layers ):
UpperCAmelCase_ : Optional[Any] = FlaxTransformeraDModel(
in_channels=self.in_channels ,n_heads=self.num_attention_heads ,d_head=self.in_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
attentions.append(lowerCamelCase_ )
UpperCAmelCase_ : Any = FlaxResnetBlockaD(
in_channels=self.in_channels ,out_channels=self.in_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(lowerCamelCase_ )
UpperCAmelCase_ : Dict = resnets
UpperCAmelCase_ : Any = attentions
def __call__( self: str ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: str ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Union[str, Any]=True ) -> List[Any]:
UpperCAmelCase_ : List[Any] = self.resnets[0](lowerCamelCase_ ,lowerCamelCase_ )
for attn, resnet in zip(self.attentions ,self.resnets[1:] ):
UpperCAmelCase_ : Optional[Any] = attn(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = resnet(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
return hidden_states
| 345 | 1 |
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'''facebook/encodec_24khz''': '''https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json''',
'''facebook/encodec_48khz''': '''https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json''',
}
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : List[str] = "encodec"
def __init__( self: List[str] ,lowerCamelCase_: Union[str, Any]=[1.5, 3.0, 6.0, 1_2.0, 2_4.0] ,lowerCamelCase_: Tuple=24000 ,lowerCamelCase_: List[str]=1 ,lowerCamelCase_: Any=False ,lowerCamelCase_: Union[str, Any]=None ,lowerCamelCase_: Optional[Any]=None ,lowerCamelCase_: Union[str, Any]=128 ,lowerCamelCase_: List[Any]=32 ,lowerCamelCase_: Optional[int]=1 ,lowerCamelCase_: str=[8, 5, 4, 2] ,lowerCamelCase_: Optional[int]="weight_norm" ,lowerCamelCase_: Tuple=7 ,lowerCamelCase_: Any=7 ,lowerCamelCase_: Optional[int]=3 ,lowerCamelCase_: int=2 ,lowerCamelCase_: Optional[int]=True ,lowerCamelCase_: Optional[Any]="reflect" ,lowerCamelCase_: Optional[Any]=2 ,lowerCamelCase_: Optional[Any]=2 ,lowerCamelCase_: List[Any]=1.0 ,lowerCamelCase_: Optional[int]=1024 ,lowerCamelCase_: Optional[Any]=None ,lowerCamelCase_: Optional[int]=True ,**lowerCamelCase_: Optional[int] ,) -> Optional[Any]:
UpperCAmelCase_ : Any = target_bandwidths
UpperCAmelCase_ : List[str] = sampling_rate
UpperCAmelCase_ : Optional[Any] = audio_channels
UpperCAmelCase_ : Tuple = normalize
UpperCAmelCase_ : str = chunk_length_s
UpperCAmelCase_ : int = overlap
UpperCAmelCase_ : Any = hidden_size
UpperCAmelCase_ : List[Any] = num_filters
UpperCAmelCase_ : Tuple = num_residual_layers
UpperCAmelCase_ : str = upsampling_ratios
UpperCAmelCase_ : List[Any] = norm_type
UpperCAmelCase_ : int = kernel_size
UpperCAmelCase_ : Optional[int] = last_kernel_size
UpperCAmelCase_ : Dict = residual_kernel_size
UpperCAmelCase_ : str = dilation_growth_rate
UpperCAmelCase_ : Tuple = use_causal_conv
UpperCAmelCase_ : Any = pad_mode
UpperCAmelCase_ : int = compress
UpperCAmelCase_ : Optional[int] = num_lstm_layers
UpperCAmelCase_ : str = trim_right_ratio
UpperCAmelCase_ : Union[str, Any] = codebook_size
UpperCAmelCase_ : List[Any] = codebook_dim if codebook_dim is not None else hidden_size
UpperCAmelCase_ : List[str] = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
F'''self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}''' )
super().__init__(**lowerCamelCase_ )
@property
def A__ ( self: List[Any] ) -> Optional[int]:
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def A__ ( self: Tuple ) -> Optional[int]:
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 ,int((1.0 - self.overlap) * self.chunk_length ) )
@property
def A__ ( self: Union[str, Any] ) -> int:
UpperCAmelCase_ : Dict = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def A__ ( self: Any ) -> int:
return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 345 |
import pickle
import numpy as np
from matplotlib import pyplot as plt
class _snake_case :
'''simple docstring'''
def __init__( self: Any ,lowerCamelCase_: Dict ,lowerCamelCase_: Tuple ,lowerCamelCase_: Dict ,lowerCamelCase_: Tuple ,lowerCamelCase_: Any ,lowerCamelCase_: Tuple=0.2 ,lowerCamelCase_: Union[str, Any]=0.2 ) -> List[str]:
UpperCAmelCase_ : List[Any] = bp_numa
UpperCAmelCase_ : str = bp_numa
UpperCAmelCase_ : List[Any] = bp_numa
UpperCAmelCase_ : Optional[int] = conva_get[:2]
UpperCAmelCase_ : List[Any] = conva_get[2]
UpperCAmelCase_ : str = size_pa
UpperCAmelCase_ : Optional[int] = rate_w
UpperCAmelCase_ : Dict = rate_t
UpperCAmelCase_ : List[Any] = [
np.mat(-1 * np.random.rand(self.conva[0] ,self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
UpperCAmelCase_ : int = np.mat(-1 * np.random.rand(self.num_bpa ,self.num_bpa ) + 0.5 )
UpperCAmelCase_ : int = np.mat(-1 * np.random.rand(self.num_bpa ,self.num_bpa ) + 0.5 )
UpperCAmelCase_ : Dict = -2 * np.random.rand(self.conva[1] ) + 1
UpperCAmelCase_ : str = -2 * np.random.rand(self.num_bpa ) + 1
UpperCAmelCase_ : Union[str, Any] = -2 * np.random.rand(self.num_bpa ) + 1
def A__ ( self: str ,lowerCamelCase_: Optional[Any] ) -> Tuple:
# save model dict with pickle
UpperCAmelCase_ : Dict = {
"""num_bp1""": self.num_bpa,
"""num_bp2""": self.num_bpa,
"""num_bp3""": self.num_bpa,
"""conv1""": self.conva,
"""step_conv1""": self.step_conva,
"""size_pooling1""": self.size_poolinga,
"""rate_weight""": self.rate_weight,
"""rate_thre""": self.rate_thre,
"""w_conv1""": self.w_conva,
"""wkj""": self.wkj,
"""vji""": self.vji,
"""thre_conv1""": self.thre_conva,
"""thre_bp2""": self.thre_bpa,
"""thre_bp3""": self.thre_bpa,
}
with open(lowerCamelCase_ ,"""wb""" ) as f:
pickle.dump(lowerCamelCase_ ,lowerCamelCase_ )
print(F'''Model saved: {save_path}''' )
@classmethod
def A__ ( cls: List[str] ,lowerCamelCase_: str ) -> List[str]:
# read saved model
with open(lowerCamelCase_ ,"""rb""" ) as f:
UpperCAmelCase_ : Any = pickle.load(lowerCamelCase_ ) # noqa: S301
UpperCAmelCase_ : Union[str, Any] = model_dic.get("""conv1""" )
conv_get.append(model_dic.get("""step_conv1""" ) )
UpperCAmelCase_ : List[str] = model_dic.get("""size_pooling1""" )
UpperCAmelCase_ : Tuple = model_dic.get("""num_bp1""" )
UpperCAmelCase_ : Optional[Any] = model_dic.get("""num_bp2""" )
UpperCAmelCase_ : List[str] = model_dic.get("""num_bp3""" )
UpperCAmelCase_ : List[Any] = model_dic.get("""rate_weight""" )
UpperCAmelCase_ : Dict = model_dic.get("""rate_thre""" )
# create model instance
UpperCAmelCase_ : List[Any] = CNN(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
# modify model parameter
UpperCAmelCase_ : Any = model_dic.get("""w_conv1""" )
UpperCAmelCase_ : int = model_dic.get("""wkj""" )
UpperCAmelCase_ : int = model_dic.get("""vji""" )
UpperCAmelCase_ : Optional[int] = model_dic.get("""thre_conv1""" )
UpperCAmelCase_ : List[str] = model_dic.get("""thre_bp2""" )
UpperCAmelCase_ : Dict = model_dic.get("""thre_bp3""" )
return conv_ins
def A__ ( self: List[Any] ,lowerCamelCase_: Union[str, Any] ) -> Tuple:
return 1 / (1 + np.exp(-1 * x ))
def A__ ( self: Union[str, Any] ,lowerCamelCase_: Union[str, Any] ) -> Optional[Any]:
return round(lowerCamelCase_ ,3 )
def A__ ( self: Tuple ,lowerCamelCase_: Any ,lowerCamelCase_: List[str] ,lowerCamelCase_: str ,lowerCamelCase_: Any ,lowerCamelCase_: Union[str, Any] ) -> Any:
# convolution process
UpperCAmelCase_ : Optional[Any] = convs[0]
UpperCAmelCase_ : int = convs[1]
UpperCAmelCase_ : int = np.shape(lowerCamelCase_ )[0]
# get the data slice of original image data, data_focus
UpperCAmelCase_ : Dict = []
for i_focus in range(0 ,size_data - size_conv + 1 ,lowerCamelCase_ ):
for j_focus in range(0 ,size_data - size_conv + 1 ,lowerCamelCase_ ):
UpperCAmelCase_ : Union[str, Any] = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(lowerCamelCase_ )
# calculate the feature map of every single kernel, and saved as list of matrix
UpperCAmelCase_ : Any = []
UpperCAmelCase_ : Tuple = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(lowerCamelCase_ ):
UpperCAmelCase_ : Optional[int] = []
for i_focus in range(len(lowerCamelCase_ ) ):
UpperCAmelCase_ : int = (
np.sum(np.multiply(data_focus[i_focus] ,w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(lowerCamelCase_ ) )
UpperCAmelCase_ : Union[str, Any] = np.asmatrix(lowerCamelCase_ ).reshape(
lowerCamelCase_ ,lowerCamelCase_ )
data_featuremap.append(lowerCamelCase_ )
# expanding the data slice to One dimenssion
UpperCAmelCase_ : Optional[Any] = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(lowerCamelCase_ ) )
UpperCAmelCase_ : Optional[int] = np.asarray(lowerCamelCase_ )
return focus_list, data_featuremap
def A__ ( self: Tuple ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Optional[Any]="average_pool" ) -> List[Any]:
# pooling process
UpperCAmelCase_ : Optional[Any] = len(featuremaps[0] )
UpperCAmelCase_ : Any = int(size_map / size_pooling )
UpperCAmelCase_ : Optional[int] = []
for i_map in range(len(lowerCamelCase_ ) ):
UpperCAmelCase_ : Any = featuremaps[i_map]
UpperCAmelCase_ : Tuple = []
for i_focus in range(0 ,lowerCamelCase_ ,lowerCamelCase_ ):
for j_focus in range(0 ,lowerCamelCase_ ,lowerCamelCase_ ):
UpperCAmelCase_ : str = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(lowerCamelCase_ ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(lowerCamelCase_ ) )
UpperCAmelCase_ : int = np.asmatrix(lowerCamelCase_ ).reshape(lowerCamelCase_ ,lowerCamelCase_ )
featuremap_pooled.append(lowerCamelCase_ )
return featuremap_pooled
def A__ ( self: Union[str, Any] ,lowerCamelCase_: Tuple ) -> Optional[int]:
# expanding three dimension data to one dimension list
UpperCAmelCase_ : List[Any] = []
for i in range(len(lowerCamelCase_ ) ):
UpperCAmelCase_ : Tuple = np.shape(data[i] )
UpperCAmelCase_ : Optional[int] = data[i].reshape(1 ,shapes[0] * shapes[1] )
UpperCAmelCase_ : Optional[int] = data_listed.getA().tolist()[0]
data_expanded.extend(lowerCamelCase_ )
UpperCAmelCase_ : int = np.asarray(lowerCamelCase_ )
return data_expanded
def A__ ( self: Optional[Any] ,lowerCamelCase_: Optional[int] ) -> Union[str, Any]:
# expanding matrix to one dimension list
UpperCAmelCase_ : List[Any] = np.asarray(lowerCamelCase_ )
UpperCAmelCase_ : str = np.shape(lowerCamelCase_ )
UpperCAmelCase_ : Dict = data_mat.reshape(1 ,shapes[0] * shapes[1] )
return data_expanded
def A__ ( self: str ,lowerCamelCase_: Dict ,lowerCamelCase_: int ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Any ) -> Union[str, Any]:
UpperCAmelCase_ : Any = []
UpperCAmelCase_ : Tuple = 0
for i_map in range(lowerCamelCase_ ):
UpperCAmelCase_ : Optional[Any] = np.ones((size_map, size_map) )
for i in range(0 ,lowerCamelCase_ ,lowerCamelCase_ ):
for j in range(0 ,lowerCamelCase_ ,lowerCamelCase_ ):
UpperCAmelCase_ : Any = pd_pool[
i_pool
]
UpperCAmelCase_ : List[str] = i_pool + 1
UpperCAmelCase_ : Optional[Any] = np.multiply(
lowerCamelCase_ ,np.multiply(out_map[i_map] ,(1 - out_map[i_map]) ) )
pd_all.append(lowerCamelCase_ )
return pd_all
def A__ ( self: str ,lowerCamelCase_: int ,lowerCamelCase_: int ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Any ,lowerCamelCase_: List[str] ,lowerCamelCase_: Any=bool ) -> Optional[int]:
# model traning
print("""----------------------Start Training-------------------------""" )
print((""" - - Shape: Train_Data """, np.shape(lowerCamelCase_ )) )
print((""" - - Shape: Teach_Data """, np.shape(lowerCamelCase_ )) )
UpperCAmelCase_ : str = 0
UpperCAmelCase_ : Tuple = []
UpperCAmelCase_ : Any = 10000
while rp < n_repeat and mse >= error_accuracy:
UpperCAmelCase_ : List[str] = 0
print(F'''-------------Learning Time {rp}--------------''' )
for p in range(len(lowerCamelCase_ ) ):
# print('------------Learning Image: %d--------------'%p)
UpperCAmelCase_ : str = np.asmatrix(datas_train[p] )
UpperCAmelCase_ : Optional[Any] = np.asarray(datas_teach[p] )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.convolute(
lowerCamelCase_ ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,)
UpperCAmelCase_ : List[Any] = self.pooling(lowerCamelCase_ ,self.size_poolinga )
UpperCAmelCase_ : int = np.shape(lowerCamelCase_ )
UpperCAmelCase_ : Dict = self._expand(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = data_bp_input
UpperCAmelCase_ : Optional[Any] = np.dot(lowerCamelCase_ ,self.vji.T ) - self.thre_bpa
UpperCAmelCase_ : int = self.sig(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = np.dot(lowerCamelCase_ ,self.wkj.T ) - self.thre_bpa
UpperCAmelCase_ : Optional[Any] = self.sig(lowerCamelCase_ )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
UpperCAmelCase_ : List[str] = np.multiply(
(data_teach - bp_outa) ,np.multiply(lowerCamelCase_ ,(1 - bp_outa) ) )
UpperCAmelCase_ : List[Any] = np.multiply(
np.dot(lowerCamelCase_ ,self.wkj ) ,np.multiply(lowerCamelCase_ ,(1 - bp_outa) ) )
UpperCAmelCase_ : Any = np.dot(lowerCamelCase_ ,self.vji )
UpperCAmelCase_ : Tuple = pd_i_all / (self.size_poolinga * self.size_poolinga)
UpperCAmelCase_ : List[str] = pd_conva_pooled.T.getA().tolist()
UpperCAmelCase_ : str = self._calculate_gradient_from_pool(
lowerCamelCase_ ,lowerCamelCase_ ,shape_featuremapa[0] ,shape_featuremapa[1] ,self.size_poolinga ,)
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
UpperCAmelCase_ : List[str] = self._expand_mat(pd_conva_all[k_conv] )
UpperCAmelCase_ : Optional[Any] = self.rate_weight * np.dot(lowerCamelCase_ ,lowerCamelCase_ )
UpperCAmelCase_ : int = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
UpperCAmelCase_ : str = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
UpperCAmelCase_ : int = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
UpperCAmelCase_ : Tuple = self.vji + pd_j_all.T * bp_outa * self.rate_weight
UpperCAmelCase_ : int = self.thre_bpa - pd_k_all * self.rate_thre
UpperCAmelCase_ : str = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
UpperCAmelCase_ : int = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
UpperCAmelCase_ : int = rp + 1
UpperCAmelCase_ : Any = error_count / patterns
all_mse.append(lowerCamelCase_ )
def draw_error():
UpperCAmelCase_ : Any = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(lowerCamelCase_ ,"""+-""" )
plt.plot(lowerCamelCase_ ,"""r--""" )
plt.xlabel("""Learning Times""" )
plt.ylabel("""All_mse""" )
plt.grid(lowerCamelCase_ ,alpha=0.5 )
plt.show()
print("""------------------Training Complished---------------------""" )
print((""" - - Training epoch: """, rp, F''' - - Mse: {mse:.6f}''') )
if draw_e:
draw_error()
return mse
def A__ ( self: Optional[int] ,lowerCamelCase_: Any ) -> Tuple:
# model predict
UpperCAmelCase_ : Union[str, Any] = []
print("""-------------------Start Testing-------------------------""" )
print((""" - - Shape: Test_Data """, np.shape(lowerCamelCase_ )) )
for p in range(len(lowerCamelCase_ ) ):
UpperCAmelCase_ : int = np.asmatrix(datas_test[p] )
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.convolute(
lowerCamelCase_ ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,)
UpperCAmelCase_ : Optional[Any] = self.pooling(lowerCamelCase_ ,self.size_poolinga )
UpperCAmelCase_ : str = self._expand(lowerCamelCase_ )
UpperCAmelCase_ : str = data_bp_input
UpperCAmelCase_ : Union[str, Any] = bp_outa * self.vji.T - self.thre_bpa
UpperCAmelCase_ : Optional[int] = self.sig(lowerCamelCase_ )
UpperCAmelCase_ : Tuple = bp_outa * self.wkj.T - self.thre_bpa
UpperCAmelCase_ : List[Any] = self.sig(lowerCamelCase_ )
produce_out.extend(bp_outa.getA().tolist() )
UpperCAmelCase_ : int = [list(map(self.do_round ,lowerCamelCase_ ) ) for each in produce_out]
return np.asarray(lowerCamelCase_ )
def A__ ( self: Optional[Any] ,lowerCamelCase_: Dict ) -> Tuple:
# return the data of image after convoluting process so we can check it out
UpperCAmelCase_ : Optional[int] = np.asmatrix(lowerCamelCase_ )
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self.convolute(
lowerCamelCase_ ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,)
UpperCAmelCase_ : Dict = self.pooling(lowerCamelCase_ ,self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 345 | 1 |
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
# TODO Update this
UpperCamelCase_ = {
'''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : Tuple = "esm"
def __init__( self: List[Any] ,lowerCamelCase_: Any=None ,lowerCamelCase_: List[Any]=None ,lowerCamelCase_: Dict=None ,lowerCamelCase_: Tuple=768 ,lowerCamelCase_: Tuple=12 ,lowerCamelCase_: Optional[int]=12 ,lowerCamelCase_: Optional[int]=3072 ,lowerCamelCase_: List[str]=0.1 ,lowerCamelCase_: Any=0.1 ,lowerCamelCase_: Any=1026 ,lowerCamelCase_: Union[str, Any]=0.0_2 ,lowerCamelCase_: List[str]=1e-12 ,lowerCamelCase_: Optional[int]="absolute" ,lowerCamelCase_: Any=True ,lowerCamelCase_: int=None ,lowerCamelCase_: List[str]=False ,lowerCamelCase_: str=False ,lowerCamelCase_: List[Any]=None ,lowerCamelCase_: int=None ,**lowerCamelCase_: int ,) -> Tuple:
super().__init__(pad_token_id=lowerCamelCase_ ,mask_token_id=lowerCamelCase_ ,**lowerCamelCase_ )
UpperCAmelCase_ : str = vocab_size
UpperCAmelCase_ : Union[str, Any] = hidden_size
UpperCAmelCase_ : List[str] = num_hidden_layers
UpperCAmelCase_ : Dict = num_attention_heads
UpperCAmelCase_ : Optional[int] = intermediate_size
UpperCAmelCase_ : List[Any] = hidden_dropout_prob
UpperCAmelCase_ : int = attention_probs_dropout_prob
UpperCAmelCase_ : str = max_position_embeddings
UpperCAmelCase_ : List[Any] = initializer_range
UpperCAmelCase_ : Optional[int] = layer_norm_eps
UpperCAmelCase_ : int = position_embedding_type
UpperCAmelCase_ : Optional[Any] = use_cache
UpperCAmelCase_ : int = emb_layer_norm_before
UpperCAmelCase_ : List[str] = token_dropout
UpperCAmelCase_ : Optional[Any] = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("""No esmfold_config supplied for folding model, using default values.""" )
UpperCAmelCase_ : int = EsmFoldConfig()
elif isinstance(lowerCamelCase_ ,lowerCamelCase_ ):
UpperCAmelCase_ : Optional[Any] = EsmFoldConfig(**lowerCamelCase_ )
UpperCAmelCase_ : str = esmfold_config
if vocab_list is None:
logger.warning("""No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!""" )
UpperCAmelCase_ : str = get_default_vocab_list()
else:
UpperCAmelCase_ : Union[str, Any] = vocab_list
else:
UpperCAmelCase_ : Optional[int] = None
UpperCAmelCase_ : str = None
if self.esmfold_config is not None and getattr(self.esmfold_config ,"""use_esm_attn_map""" ,lowerCamelCase_ ):
raise ValueError("""The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!""" )
def A__ ( self: int ) -> str:
UpperCAmelCase_ : Tuple = super().to_dict()
if isinstance(self.esmfold_config ,lowerCamelCase_ ):
UpperCAmelCase_ : Optional[Any] = self.esmfold_config.to_dict()
return output
@dataclass
class _snake_case :
'''simple docstring'''
A__ : str = None
A__ : bool = True
A__ : bool = False
A__ : bool = False
A__ : bool = False
A__ : float = 0
A__ : bool = True
A__ : bool = False
A__ : int = 128
A__ : "TrunkConfig" = None
def A__ ( self: List[Any] ) -> Union[str, Any]:
if self.trunk is None:
UpperCAmelCase_ : Union[str, Any] = TrunkConfig()
elif isinstance(self.trunk ,lowerCamelCase_ ):
UpperCAmelCase_ : Dict = TrunkConfig(**self.trunk )
def A__ ( self: Dict ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = asdict(self )
UpperCAmelCase_ : str = self.trunk.to_dict()
return output
@dataclass
class _snake_case :
'''simple docstring'''
A__ : int = 48
A__ : int = 1_024
A__ : int = 128
A__ : int = 32
A__ : int = 32
A__ : int = 32
A__ : float = 0
A__ : float = 0
A__ : bool = False
A__ : int = 4
A__ : Optional[int] = 128
A__ : "StructureModuleConfig" = None
def A__ ( self: int ) -> Optional[Any]:
if self.structure_module is None:
UpperCAmelCase_ : str = StructureModuleConfig()
elif isinstance(self.structure_module ,lowerCamelCase_ ):
UpperCAmelCase_ : Union[str, Any] = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F'''`max_recycles` should be positive, got {self.max_recycles}.''' )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"""`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"""
F''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"""`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"""
F''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' )
UpperCAmelCase_ : int = self.sequence_state_dim // self.sequence_head_width
UpperCAmelCase_ : List[Any] = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"""`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"""
F''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"""`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"""
F''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' )
if self.dropout >= 0.4:
raise ValueError(F'''`dropout` should not be greater than 0.4, got {self.dropout}.''' )
def A__ ( self: Optional[Any] ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[int] = asdict(self )
UpperCAmelCase_ : Tuple = self.structure_module.to_dict()
return output
@dataclass
class _snake_case :
'''simple docstring'''
A__ : int = 384
A__ : int = 128
A__ : int = 16
A__ : int = 128
A__ : int = 12
A__ : int = 4
A__ : int = 8
A__ : float = 0.1
A__ : int = 8
A__ : int = 1
A__ : int = 2
A__ : int = 7
A__ : int = 10
A__ : float = 1E-8
A__ : float = 1E5
def A__ ( self: Dict ) -> Tuple:
return asdict(self )
def lowerCamelCase_ ( ):
'''simple docstring'''
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 345 |
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _snake_case ( __snake_case , unittest.TestCase ):
'''simple docstring'''
A__ : Optional[Any] = CTRLTokenizer
A__ : Optional[Any] = False
A__ : str = False
def A__ ( self: Optional[int] ) -> List[Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase_ : Dict = ["""adapt""", """re@@""", """a@@""", """apt""", """c@@""", """t""", """<unk>"""]
UpperCAmelCase_ : Union[str, Any] = dict(zip(lowerCamelCase_ ,range(len(lowerCamelCase_ ) ) ) )
UpperCAmelCase_ : List[Any] = ["""#version: 0.2""", """a p""", """ap t</w>""", """r e""", """a d""", """ad apt</w>""", """"""]
UpperCAmelCase_ : Optional[Any] = {"""unk_token""": """<unk>"""}
UpperCAmelCase_ : Union[str, Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCAmelCase_ : Optional[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCamelCase_ ) + """\n""" )
with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowerCamelCase_ ) )
def A__ ( self: Optional[int] ,**lowerCamelCase_: Any ) -> str:
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname ,**lowerCamelCase_ )
def A__ ( self: int ,lowerCamelCase_: int ) -> str:
UpperCAmelCase_ : List[str] = """adapt react readapt apt"""
UpperCAmelCase_ : List[Any] = """adapt react readapt apt"""
return input_text, output_text
def A__ ( self: Union[str, Any] ) -> Optional[int]:
UpperCAmelCase_ : Union[str, Any] = CTRLTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
UpperCAmelCase_ : List[Any] = """adapt react readapt apt"""
UpperCAmelCase_ : Optional[int] = """adapt re@@ a@@ c@@ t re@@ adapt apt""".split()
UpperCAmelCase_ : Tuple = tokenizer.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ ,lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = tokens + [tokenizer.unk_token]
UpperCAmelCase_ : List[str] = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) ,lowerCamelCase_ )
| 345 | 1 |
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def lowerCamelCase_ ( ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = {
"""repo_name""": ["""test_repo1""", """test_repo2""", """test_repo3"""],
"""path""": ["""test_1.py""", """test_2.py""", """unit_test.py"""],
"""content""": ["""a """ * 20, """a """ * 30, """b """ * 7],
}
UpperCAmelCase_ : Dict = Dataset.from_dict(_a )
return dataset
class _snake_case ( __snake_case ):
'''simple docstring'''
def A__ ( self: Union[str, Any] ) -> int:
UpperCAmelCase_ : str = get_dataset()
UpperCAmelCase_ : List[Any] = make_duplicate_clusters(lowerCamelCase_ ,0.8_5 )
self.assertEqual(len(duplicate_clusters[0] ) ,2 )
def A__ ( self: str ) -> Union[str, Any]:
UpperCAmelCase_ : Dict = get_dataset()
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = deduplicate_dataset(lowerCamelCase_ )
self.assertEqual(len(lowerCamelCase_ ) ,2 )
print(lowerCamelCase_ )
self.assertEqual(duplicate_clusters[0][0]["""copies"""] ,2 )
self.assertEqual(duplicate_clusters[0][0]["""is_extreme"""] ,lowerCamelCase_ )
| 345 |
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
UpperCamelCase_ = {
'''susnato/ernie-m-base_pytorch''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json''',
'''susnato/ernie-m-large_pytorch''': '''https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json''',
}
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : Union[str, Any] = "ernie_m"
A__ : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self: str ,lowerCamelCase_: int = 250002 ,lowerCamelCase_: int = 768 ,lowerCamelCase_: int = 12 ,lowerCamelCase_: int = 12 ,lowerCamelCase_: int = 3072 ,lowerCamelCase_: str = "gelu" ,lowerCamelCase_: float = 0.1 ,lowerCamelCase_: float = 0.1 ,lowerCamelCase_: int = 514 ,lowerCamelCase_: float = 0.0_2 ,lowerCamelCase_: int = 1 ,lowerCamelCase_: float = 1e-05 ,lowerCamelCase_: Any=None ,lowerCamelCase_: List[Any]=False ,lowerCamelCase_: Tuple=0.0 ,**lowerCamelCase_: Optional[int] ,) -> Optional[Any]:
super().__init__(pad_token_id=lowerCamelCase_ ,**lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = vocab_size
UpperCAmelCase_ : Any = hidden_size
UpperCAmelCase_ : Optional[Any] = num_hidden_layers
UpperCAmelCase_ : Union[str, Any] = num_attention_heads
UpperCAmelCase_ : List[Any] = intermediate_size
UpperCAmelCase_ : List[Any] = hidden_act
UpperCAmelCase_ : Any = hidden_dropout_prob
UpperCAmelCase_ : List[Any] = attention_probs_dropout_prob
UpperCAmelCase_ : str = max_position_embeddings
UpperCAmelCase_ : Union[str, Any] = initializer_range
UpperCAmelCase_ : Union[str, Any] = layer_norm_eps
UpperCAmelCase_ : List[Any] = classifier_dropout
UpperCAmelCase_ : str = is_decoder
UpperCAmelCase_ : List[str] = act_dropout
| 345 | 1 |
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def lowerCamelCase_ ( _a : Dict , _a : Optional[int]=0.9_9_9 , _a : Union[str, Any]="cosine" , ):
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(_a : Union[str, Any] ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_a : Union[str, Any] ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
UpperCAmelCase_ : List[Any] = []
for i in range(_a ):
UpperCAmelCase_ : str = i / num_diffusion_timesteps
UpperCAmelCase_ : List[str] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_a ) / alpha_bar_fn(_a ) , _a ) )
return torch.tensor(_a , dtype=torch.floataa )
class _snake_case ( __snake_case , __snake_case ):
'''simple docstring'''
A__ : str = [e.name for e in KarrasDiffusionSchedulers]
A__ : Optional[Any] = 2
@register_to_config
def __init__( self: str ,lowerCamelCase_: int = 1000 ,lowerCamelCase_: float = 0.0_0_0_8_5 ,lowerCamelCase_: float = 0.0_1_2 ,lowerCamelCase_: str = "linear" ,lowerCamelCase_: Optional[Union[np.ndarray, List[float]]] = None ,lowerCamelCase_: str = "epsilon" ,lowerCamelCase_: Optional[bool] = False ,lowerCamelCase_: Optional[bool] = False ,lowerCamelCase_: float = 1.0 ,lowerCamelCase_: str = "linspace" ,lowerCamelCase_: int = 0 ,) -> str:
if trained_betas is not None:
UpperCAmelCase_ : Optional[Any] = torch.tensor(lowerCamelCase_ ,dtype=torch.floataa )
elif beta_schedule == "linear":
UpperCAmelCase_ : Tuple = torch.linspace(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
UpperCAmelCase_ : int = (
torch.linspace(beta_start**0.5 ,beta_end**0.5 ,lowerCamelCase_ ,dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
UpperCAmelCase_ : Any = betas_for_alpha_bar(lowerCamelCase_ ,alpha_transform_type="""cosine""" )
elif beta_schedule == "exp":
UpperCAmelCase_ : Optional[Any] = betas_for_alpha_bar(lowerCamelCase_ ,alpha_transform_type="""exp""" )
else:
raise NotImplementedError(F'''{beta_schedule} does is not implemented for {self.__class__}''' )
UpperCAmelCase_ : Union[str, Any] = 1.0 - self.betas
UpperCAmelCase_ : List[str] = torch.cumprod(self.alphas ,dim=0 )
# set all values
self.set_timesteps(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
UpperCAmelCase_ : str = use_karras_sigmas
def A__ ( self: Union[str, Any] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Tuple=None ) -> List[Any]:
if schedule_timesteps is None:
UpperCAmelCase_ : List[Any] = self.timesteps
UpperCAmelCase_ : Optional[int] = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
UpperCAmelCase_ : Dict = 1 if len(lowerCamelCase_ ) > 1 else 0
else:
UpperCAmelCase_ : Optional[int] = timestep.cpu().item() if torch.is_tensor(lowerCamelCase_ ) else timestep
UpperCAmelCase_ : Dict = self._index_counter[timestep_int]
return indices[pos].item()
@property
def A__ ( self: Tuple ) -> Optional[Any]:
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def A__ ( self: Optional[Any] ,lowerCamelCase_: torch.FloatTensor ,lowerCamelCase_: Union[float, torch.FloatTensor] ,) -> torch.FloatTensor:
UpperCAmelCase_ : int = self.index_for_timestep(lowerCamelCase_ )
UpperCAmelCase_ : str = self.sigmas[step_index]
UpperCAmelCase_ : Union[str, Any] = sample / ((sigma**2 + 1) ** 0.5)
return sample
def A__ ( self: List[str] ,lowerCamelCase_: int ,lowerCamelCase_: Union[str, torch.device] = None ,lowerCamelCase_: Optional[int] = None ,) -> Optional[Any]:
UpperCAmelCase_ : str = num_inference_steps
UpperCAmelCase_ : List[Any] = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
UpperCAmelCase_ : Dict = np.linspace(0 ,num_train_timesteps - 1 ,lowerCamelCase_ ,dtype=lowerCamelCase_ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
UpperCAmelCase_ : Optional[Any] = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
UpperCAmelCase_ : str = (np.arange(0 ,lowerCamelCase_ ) * step_ratio).round()[::-1].copy().astype(lowerCamelCase_ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
UpperCAmelCase_ : Tuple = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
UpperCAmelCase_ : List[Any] = (np.arange(lowerCamelCase_ ,0 ,-step_ratio )).round().copy().astype(lowerCamelCase_ )
timesteps -= 1
else:
raise ValueError(
F'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
UpperCAmelCase_ : int = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
UpperCAmelCase_ : List[str] = np.log(lowerCamelCase_ )
UpperCAmelCase_ : Dict = np.interp(lowerCamelCase_ ,np.arange(0 ,len(lowerCamelCase_ ) ) ,lowerCamelCase_ )
if self.config.use_karras_sigmas:
UpperCAmelCase_ : Union[str, Any] = self._convert_to_karras(in_sigmas=lowerCamelCase_ ,num_inference_steps=self.num_inference_steps )
UpperCAmelCase_ : List[Any] = np.array([self._sigma_to_t(lowerCamelCase_ ,lowerCamelCase_ ) for sigma in sigmas] )
UpperCAmelCase_ : List[Any] = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
UpperCAmelCase_ : int = torch.from_numpy(lowerCamelCase_ ).to(device=lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
UpperCAmelCase_ : Tuple = torch.from_numpy(lowerCamelCase_ )
UpperCAmelCase_ : int = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(lowerCamelCase_ ).startswith("""mps""" ):
# mps does not support float64
UpperCAmelCase_ : str = timesteps.to(lowerCamelCase_ ,dtype=torch.floataa )
else:
UpperCAmelCase_ : Tuple = timesteps.to(device=lowerCamelCase_ )
# empty dt and derivative
UpperCAmelCase_ : Optional[int] = None
UpperCAmelCase_ : Any = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
UpperCAmelCase_ : Dict = defaultdict(lowerCamelCase_ )
def A__ ( self: List[Any] ,lowerCamelCase_: Dict ,lowerCamelCase_: str ) -> Tuple:
# get log sigma
UpperCAmelCase_ : Any = np.log(lowerCamelCase_ )
# get distribution
UpperCAmelCase_ : Optional[Any] = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
UpperCAmelCase_ : Union[str, Any] = np.cumsum((dists >= 0) ,axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
UpperCAmelCase_ : List[str] = low_idx + 1
UpperCAmelCase_ : List[Any] = log_sigmas[low_idx]
UpperCAmelCase_ : List[str] = log_sigmas[high_idx]
# interpolate sigmas
UpperCAmelCase_ : int = (low - log_sigma) / (low - high)
UpperCAmelCase_ : List[str] = np.clip(lowerCamelCase_ ,0 ,1 )
# transform interpolation to time range
UpperCAmelCase_ : Union[str, Any] = (1 - w) * low_idx + w * high_idx
UpperCAmelCase_ : Dict = t.reshape(sigma.shape )
return t
def A__ ( self: str ,lowerCamelCase_: torch.FloatTensor ,lowerCamelCase_: Any ) -> torch.FloatTensor:
UpperCAmelCase_ : float = in_sigmas[-1].item()
UpperCAmelCase_ : float = in_sigmas[0].item()
UpperCAmelCase_ : int = 7.0 # 7.0 is the value used in the paper
UpperCAmelCase_ : List[Any] = np.linspace(0 ,1 ,lowerCamelCase_ )
UpperCAmelCase_ : int = sigma_min ** (1 / rho)
UpperCAmelCase_ : Dict = sigma_max ** (1 / rho)
UpperCAmelCase_ : int = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def A__ ( self: int ) -> Tuple:
return self.dt is None
def A__ ( self: Union[str, Any] ,lowerCamelCase_: Union[torch.FloatTensor, np.ndarray] ,lowerCamelCase_: Union[float, torch.FloatTensor] ,lowerCamelCase_: Union[torch.FloatTensor, np.ndarray] ,lowerCamelCase_: bool = True ,) -> Union[SchedulerOutput, Tuple]:
UpperCAmelCase_ : int = self.index_for_timestep(lowerCamelCase_ )
# advance index counter by 1
UpperCAmelCase_ : Optional[Any] = timestep.cpu().item() if torch.is_tensor(lowerCamelCase_ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
UpperCAmelCase_ : str = self.sigmas[step_index]
UpperCAmelCase_ : str = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
UpperCAmelCase_ : Optional[int] = self.sigmas[step_index - 1]
UpperCAmelCase_ : Optional[Any] = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
UpperCAmelCase_ : Optional[Any] = 0
UpperCAmelCase_ : Tuple = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
UpperCAmelCase_ : Union[str, Any] = sigma_hat if self.state_in_first_order else sigma_next
UpperCAmelCase_ : Tuple = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
UpperCAmelCase_ : Optional[int] = sigma_hat if self.state_in_first_order else sigma_next
UpperCAmelCase_ : List[Any] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
UpperCAmelCase_ : Any = model_output
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.config.clip_sample:
UpperCAmelCase_ : Optional[int] = pred_original_sample.clamp(
-self.config.clip_sample_range ,self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
UpperCAmelCase_ : Union[str, Any] = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
UpperCAmelCase_ : Optional[Any] = sigma_next - sigma_hat
# store for 2nd order step
UpperCAmelCase_ : Optional[Any] = derivative
UpperCAmelCase_ : Tuple = dt
UpperCAmelCase_ : Union[str, Any] = sample
else:
# 2. 2nd order / Heun's method
UpperCAmelCase_ : List[str] = (sample - pred_original_sample) / sigma_next
UpperCAmelCase_ : Tuple = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
UpperCAmelCase_ : Dict = self.dt
UpperCAmelCase_ : Tuple = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : List[str] = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : List[Any] = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCamelCase_ )
def A__ ( self: Optional[int] ,lowerCamelCase_: torch.FloatTensor ,lowerCamelCase_: torch.FloatTensor ,lowerCamelCase_: torch.FloatTensor ,) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
UpperCAmelCase_ : Optional[int] = self.sigmas.to(device=original_samples.device ,dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(lowerCamelCase_ ):
# mps does not support float64
UpperCAmelCase_ : Any = self.timesteps.to(original_samples.device ,dtype=torch.floataa )
UpperCAmelCase_ : List[str] = timesteps.to(original_samples.device ,dtype=torch.floataa )
else:
UpperCAmelCase_ : Optional[int] = self.timesteps.to(original_samples.device )
UpperCAmelCase_ : Union[str, Any] = timesteps.to(original_samples.device )
UpperCAmelCase_ : str = [self.index_for_timestep(lowerCamelCase_ ,lowerCamelCase_ ) for t in timesteps]
UpperCAmelCase_ : int = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
UpperCAmelCase_ : Optional[Any] = sigma.unsqueeze(-1 )
UpperCAmelCase_ : str = original_samples + noise * sigma
return noisy_samples
def __len__( self: str ) -> Union[str, Any]:
return self.config.num_train_timesteps
| 345 |
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
UpperCamelCase_ = logging.getLogger(__name__)
torch.set_grad_enabled(False)
UpperCamelCase_ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
def lowerCamelCase_ ( _a : str , _a : Any=100 , _a : int=" " ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = text.split(_a )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(_a ) , _a )]
def lowerCamelCase_ ( _a : dict ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ : Dict = [], []
for title, text in zip(documents["""title"""] , documents["""text"""] ):
if text is not None:
for passage in split_text(_a ):
titles.append(title if title is not None else """""" )
texts.append(_a )
return {"title": titles, "text": texts}
def lowerCamelCase_ ( _a : dict , _a : DPRContextEncoder , _a : DPRContextEncoderTokenizerFast ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = ctx_tokenizer(
documents["""title"""] , documents["""text"""] , truncation=_a , padding="""longest""" , return_tensors="""pt""" )["""input_ids"""]
UpperCAmelCase_ : Tuple = ctx_encoder(input_ids.to(device=_a ) , return_dict=_a ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def lowerCamelCase_ ( _a : "RagExampleArguments" , _a : "ProcessingArguments" , _a : "IndexHnswArguments" , ):
'''simple docstring'''
logger.info("""Step 1 - Create the dataset""" )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
UpperCAmelCase_ : Optional[int] = load_dataset(
"""csv""" , data_files=[rag_example_args.csv_path] , split="""train""" , delimiter="""\t""" , column_names=["""title""", """text"""] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
UpperCAmelCase_ : Tuple = dataset.map(_a , batched=_a , num_proc=processing_args.num_proc )
# And compute the embeddings
UpperCAmelCase_ : List[str] = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=_a )
UpperCAmelCase_ : Dict = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
UpperCAmelCase_ : Any = Features(
{"""text""": Value("""string""" ), """title""": Value("""string""" ), """embeddings""": Sequence(Value("""float32""" ) )} ) # optional, save as float32 instead of float64 to save space
UpperCAmelCase_ : List[str] = dataset.map(
partial(_a , ctx_encoder=_a , ctx_tokenizer=_a ) , batched=_a , batch_size=processing_args.batch_size , features=_a , )
# And finally save your dataset
UpperCAmelCase_ : Union[str, Any] = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset""" )
dataset.save_to_disk(_a )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info("""Step 2 - Index the dataset""" )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
UpperCAmelCase_ : Union[str, Any] = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index("""embeddings""" , custom_index=_a )
# And save the index
UpperCAmelCase_ : Optional[Any] = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset_hnsw_index.faiss""" )
dataset.get_index("""embeddings""" ).save(_a )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class _snake_case :
'''simple docstring'''
A__ : str = field(
default=str(Path(__snake_case ).parent / "test_run" / "dummy-kb" / "my_knowledge_dataset.csv" ) , metadata={"help": "Path to a tab-separated csv file with columns 'title' and 'text'"} , )
A__ : Optional[str] = field(
default=__snake_case , metadata={"help": "Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."} , )
A__ : str = field(
default="facebook/rag-sequence-nq" , metadata={"help": "The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"} , )
A__ : str = field(
default="facebook/dpr-ctx_encoder-multiset-base" , metadata={
"help": (
"The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or"
" 'facebook/dpr-ctx_encoder-multiset-base'"
)
} , )
A__ : Optional[str] = field(
default=str(Path(__snake_case ).parent / "test_run" / "dummy-kb" ) , metadata={"help": "Path to a directory where the dataset passages and the index will be saved"} , )
@dataclass
class _snake_case :
'''simple docstring'''
A__ : Optional[int] = field(
default=__snake_case , metadata={
"help": "The number of processes to use to split the documents into passages. Default is single process."
} , )
A__ : int = field(
default=16 , metadata={
"help": "The batch size to use when computing the passages embeddings using the DPR context encoder."
} , )
@dataclass
class _snake_case :
'''simple docstring'''
A__ : int = field(
default=768 , metadata={"help": "The dimension of the embeddings to pass to the HNSW Faiss index."} , )
A__ : int = field(
default=128 , metadata={
"help": (
"The number of bi-directional links created for every new element during the HNSW index construction."
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
UpperCamelCase_ = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
UpperCamelCase_ = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 345 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
UpperCamelCase_ = {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''',
}
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : List[Any] = "albert"
def __init__( self: Optional[int] ,lowerCamelCase_: Optional[int]=30000 ,lowerCamelCase_: Optional[int]=128 ,lowerCamelCase_: List[str]=4096 ,lowerCamelCase_: int=12 ,lowerCamelCase_: Optional[Any]=1 ,lowerCamelCase_: Dict=64 ,lowerCamelCase_: List[Any]=16384 ,lowerCamelCase_: int=1 ,lowerCamelCase_: Union[str, Any]="gelu_new" ,lowerCamelCase_: Optional[int]=0 ,lowerCamelCase_: Any=0 ,lowerCamelCase_: int=512 ,lowerCamelCase_: Any=2 ,lowerCamelCase_: str=0.0_2 ,lowerCamelCase_: List[str]=1e-12 ,lowerCamelCase_: Optional[int]=0.1 ,lowerCamelCase_: str="absolute" ,lowerCamelCase_: str=0 ,lowerCamelCase_: Tuple=2 ,lowerCamelCase_: Optional[int]=3 ,**lowerCamelCase_: Optional[Any] ,) -> List[str]:
super().__init__(pad_token_id=lowerCamelCase_ ,bos_token_id=lowerCamelCase_ ,eos_token_id=lowerCamelCase_ ,**lowerCamelCase_ )
UpperCAmelCase_ : Tuple = vocab_size
UpperCAmelCase_ : int = embedding_size
UpperCAmelCase_ : Dict = hidden_size
UpperCAmelCase_ : List[Any] = num_hidden_layers
UpperCAmelCase_ : List[str] = num_hidden_groups
UpperCAmelCase_ : List[str] = num_attention_heads
UpperCAmelCase_ : List[Any] = inner_group_num
UpperCAmelCase_ : Union[str, Any] = hidden_act
UpperCAmelCase_ : Optional[Any] = intermediate_size
UpperCAmelCase_ : Optional[int] = hidden_dropout_prob
UpperCAmelCase_ : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase_ : Union[str, Any] = max_position_embeddings
UpperCAmelCase_ : Tuple = type_vocab_size
UpperCAmelCase_ : List[Any] = initializer_range
UpperCAmelCase_ : List[Any] = layer_norm_eps
UpperCAmelCase_ : str = classifier_dropout_prob
UpperCAmelCase_ : Union[str, Any] = position_embedding_type
class _snake_case ( __snake_case ):
'''simple docstring'''
@property
def A__ ( self: Optional[int] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
UpperCAmelCase_ : Tuple = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
UpperCAmelCase_ : Dict = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 345 |
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class _snake_case ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
A__ : Dict = AutoencoderKL
A__ : Optional[int] = "sample"
A__ : Tuple = 1E-2
@property
def A__ ( self: List[Any] ) -> Union[str, Any]:
UpperCAmelCase_ : Tuple = 4
UpperCAmelCase_ : str = 3
UpperCAmelCase_ : Any = (32, 32)
UpperCAmelCase_ : Optional[int] = floats_tensor((batch_size, num_channels) + sizes ).to(lowerCamelCase_ )
return {"sample": image}
@property
def A__ ( self: List[str] ) -> Tuple:
return (3, 32, 32)
@property
def A__ ( self: Optional[Any] ) -> Any:
return (3, 32, 32)
def A__ ( self: Any ) -> Tuple:
UpperCAmelCase_ : List[Any] = {
"""block_out_channels""": [32, 64],
"""in_channels""": 3,
"""out_channels""": 3,
"""down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],
"""up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],
"""latent_channels""": 4,
}
UpperCAmelCase_ : int = self.dummy_input
return init_dict, inputs_dict
def A__ ( self: Optional[Any] ) -> int:
pass
def A__ ( self: str ) -> Any:
pass
@unittest.skipIf(torch_device == """mps""" ,"""Gradient checkpointing skipped on MPS""" )
def A__ ( self: Union[str, Any] ) -> Dict:
# enable deterministic behavior for gradient checkpointing
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.prepare_init_args_and_inputs_for_common()
UpperCAmelCase_ : List[Any] = self.model_class(**lowerCamelCase_ )
model.to(lowerCamelCase_ )
assert not model.is_gradient_checkpointing and model.training
UpperCAmelCase_ : Optional[Any] = model(**lowerCamelCase_ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
UpperCAmelCase_ : Any = torch.randn_like(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
UpperCAmelCase_ : str = self.model_class(**lowerCamelCase_ )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(lowerCamelCase_ )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
UpperCAmelCase_ : Optional[int] = model_a(**lowerCamelCase_ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
UpperCAmelCase_ : Dict = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1e-5 )
UpperCAmelCase_ : Dict = dict(model.named_parameters() )
UpperCAmelCase_ : Union[str, Any] = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data ,named_params_a[name].grad.data ,atol=5e-5 ) )
def A__ ( self: Optional[Any] ) -> str:
UpperCAmelCase_ , UpperCAmelCase_ : int = AutoencoderKL.from_pretrained("""fusing/autoencoder-kl-dummy""" ,output_loading_info=lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
self.assertEqual(len(loading_info["""missing_keys"""] ) ,0 )
model.to(lowerCamelCase_ )
UpperCAmelCase_ : Dict = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def A__ ( self: Optional[int] ) -> int:
UpperCAmelCase_ : Dict = AutoencoderKL.from_pretrained("""fusing/autoencoder-kl-dummy""" )
UpperCAmelCase_ : Tuple = model.to(lowerCamelCase_ )
model.eval()
if torch_device == "mps":
UpperCAmelCase_ : Tuple = torch.manual_seed(0 )
else:
UpperCAmelCase_ : Optional[int] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
UpperCAmelCase_ : str = torch.randn(
1 ,model.config.in_channels ,model.config.sample_size ,model.config.sample_size ,generator=torch.manual_seed(0 ) ,)
UpperCAmelCase_ : int = image.to(lowerCamelCase_ )
with torch.no_grad():
UpperCAmelCase_ : Dict = model(lowerCamelCase_ ,sample_posterior=lowerCamelCase_ ,generator=lowerCamelCase_ ).sample
UpperCAmelCase_ : Optional[int] = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
UpperCAmelCase_ : Tuple = torch.tensor(
[
-4.0078e-01,
-3.8323e-04,
-1.2681e-01,
-1.1462e-01,
2.0095e-01,
1.0893e-01,
-8.8247e-02,
-3.0361e-01,
-9.8644e-03,
] )
elif torch_device == "cpu":
UpperCAmelCase_ : List[str] = torch.tensor(
[-0.1_3_5_2, 0.0_8_7_8, 0.0_4_1_9, -0.0_8_1_8, -0.1_0_6_9, 0.0_6_8_8, -0.1_4_5_8, -0.4_4_4_6, -0.0_0_2_6] )
else:
UpperCAmelCase_ : List[str] = torch.tensor(
[-0.2_4_2_1, 0.4_6_4_2, 0.2_5_0_7, -0.0_4_3_8, 0.0_6_8_2, 0.3_1_6_0, -0.2_0_1_8, -0.0_7_2_7, 0.2_4_8_5] )
self.assertTrue(torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,rtol=1e-2 ) )
@slow
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: Any ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Any ) -> Optional[Any]:
return F'''gaussian_noise_s={seed}_shape={'_'.join([str(lowerCamelCase_ ) for s in shape] )}.npy'''
def A__ ( self: Union[str, Any] ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self: List[str] ,lowerCamelCase_: Optional[int]=0 ,lowerCamelCase_: List[Any]=(4, 3, 512, 512) ,lowerCamelCase_: Optional[Any]=False ) -> Optional[int]:
UpperCAmelCase_ : Tuple = torch.floataa if fpaa else torch.floataa
UpperCAmelCase_ : Tuple = torch.from_numpy(load_hf_numpy(self.get_file_format(lowerCamelCase_ ,lowerCamelCase_ ) ) ).to(lowerCamelCase_ ).to(lowerCamelCase_ )
return image
def A__ ( self: List[Any] ,lowerCamelCase_: List[str]="CompVis/stable-diffusion-v1-4" ,lowerCamelCase_: Union[str, Any]=False ) -> Any:
UpperCAmelCase_ : Optional[Any] = """fp16""" if fpaa else None
UpperCAmelCase_ : str = torch.floataa if fpaa else torch.floataa
UpperCAmelCase_ : int = AutoencoderKL.from_pretrained(
lowerCamelCase_ ,subfolder="""vae""" ,torch_dtype=lowerCamelCase_ ,revision=lowerCamelCase_ ,)
model.to(lowerCamelCase_ ).eval()
return model
def A__ ( self: Dict ,lowerCamelCase_: Union[str, Any]=0 ) -> Optional[int]:
if torch_device == "mps":
return torch.manual_seed(lowerCamelCase_ )
return torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_6_0_3, 0.9_8_7_8, -0.0_4_9_5, -0.0_7_9_0, -0.2_7_0_9, 0.8_3_7_5, -0.2_0_6_0, -0.0_8_2_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]],
[47, [-0.2_3_7_6, 0.1_1_6_8, 0.1_3_3_2, -0.4_8_4_0, -0.2_5_0_8, -0.0_7_9_1, -0.0_4_9_3, -0.4_0_8_9], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]],
# fmt: on
] )
def A__ ( self: List[Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: str ,lowerCamelCase_: Dict ) -> Tuple:
UpperCAmelCase_ : List[Any] = self.get_sd_vae_model()
UpperCAmelCase_ : int = self.get_sd_image(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = self.get_generator(lowerCamelCase_ )
with torch.no_grad():
UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ ,generator=lowerCamelCase_ ,sample_posterior=lowerCamelCase_ ).sample
assert sample.shape == image.shape
UpperCAmelCase_ : Optional[Any] = sample[-1, -2:, -2:, :2].flatten().float().cpu()
UpperCAmelCase_ : Tuple = torch.tensor(expected_slice_mps if torch_device == """mps""" else expected_slice )
assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0_5_1_3, 0.0_2_8_9, 1.3_7_9_9, 0.2_1_6_6, -0.2_5_7_3, -0.0_8_7_1, 0.5_1_0_3, -0.0_9_9_9]],
[47, [-0.4_1_2_8, -0.1_3_2_0, -0.3_7_0_4, 0.1_9_6_5, -0.4_1_1_6, -0.2_3_3_2, -0.3_3_4_0, 0.2_2_4_7]],
# fmt: on
] )
@require_torch_gpu
def A__ ( self: Union[str, Any] ,lowerCamelCase_: Any ,lowerCamelCase_: List[str] ) -> Tuple:
UpperCAmelCase_ : List[str] = self.get_sd_vae_model(fpaa=lowerCamelCase_ )
UpperCAmelCase_ : Any = self.get_sd_image(lowerCamelCase_ ,fpaa=lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = self.get_generator(lowerCamelCase_ )
with torch.no_grad():
UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ ,generator=lowerCamelCase_ ,sample_posterior=lowerCamelCase_ ).sample
assert sample.shape == image.shape
UpperCAmelCase_ : Tuple = sample[-1, -2:, :2, -2:].flatten().float().cpu()
UpperCAmelCase_ : Optional[int] = torch.tensor(lowerCamelCase_ )
assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_6_0_9, 0.9_8_6_6, -0.0_4_8_7, -0.0_7_7_7, -0.2_7_1_6, 0.8_3_6_8, -0.2_0_5_5, -0.0_8_1_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]],
[47, [-0.2_3_7_7, 0.1_1_4_7, 0.1_3_3_3, -0.4_8_4_1, -0.2_5_0_6, -0.0_8_0_5, -0.0_4_9_1, -0.4_0_8_5], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]],
# fmt: on
] )
def A__ ( self: Tuple ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: List[str] ) -> Dict:
UpperCAmelCase_ : Optional[int] = self.get_sd_vae_model()
UpperCAmelCase_ : Dict = self.get_sd_image(lowerCamelCase_ )
with torch.no_grad():
UpperCAmelCase_ : str = model(lowerCamelCase_ ).sample
assert sample.shape == image.shape
UpperCAmelCase_ : List[Any] = sample[-1, -2:, -2:, :2].flatten().float().cpu()
UpperCAmelCase_ : Any = torch.tensor(expected_slice_mps if torch_device == """mps""" else expected_slice )
assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2_0_5_1, -0.1_8_0_3, -0.2_3_1_1, -0.2_1_1_4, -0.3_2_9_2, -0.3_5_7_4, -0.2_9_5_3, -0.3_3_2_3]],
[37, [-0.2_6_3_2, -0.2_6_2_5, -0.2_1_9_9, -0.2_7_4_1, -0.4_5_3_9, -0.4_9_9_0, -0.3_7_2_0, -0.4_9_2_5]],
# fmt: on
] )
@require_torch_gpu
def A__ ( self: Optional[Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: str ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = self.get_sd_vae_model()
UpperCAmelCase_ : Optional[int] = self.get_sd_image(lowerCamelCase_ ,shape=(3, 4, 64, 64) )
with torch.no_grad():
UpperCAmelCase_ : str = model.decode(lowerCamelCase_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
UpperCAmelCase_ : Any = sample[-1, -2:, :2, -2:].flatten().cpu()
UpperCAmelCase_ : Union[str, Any] = torch.tensor(lowerCamelCase_ )
assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=1e-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0_3_6_9, 0.0_2_0_7, -0.0_7_7_6, -0.0_6_8_2, -0.1_7_4_7, -0.1_9_3_0, -0.1_4_6_5, -0.2_0_3_9]],
[16, [-0.1_6_2_8, -0.2_1_3_4, -0.2_7_4_7, -0.2_6_4_2, -0.3_7_7_4, -0.4_4_0_4, -0.3_6_8_7, -0.4_2_7_7]],
# fmt: on
] )
@require_torch_gpu
def A__ ( self: str ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Any ) -> Optional[Any]:
UpperCAmelCase_ : Dict = self.get_sd_vae_model(fpaa=lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = self.get_sd_image(lowerCamelCase_ ,shape=(3, 4, 64, 64) ,fpaa=lowerCamelCase_ )
with torch.no_grad():
UpperCAmelCase_ : List[str] = model.decode(lowerCamelCase_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
UpperCAmelCase_ : str = sample[-1, -2:, :2, -2:].flatten().float().cpu()
UpperCAmelCase_ : str = torch.tensor(lowerCamelCase_ )
assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=5e-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() ,reason="""xformers is not required when using PyTorch 2.0.""" )
def A__ ( self: List[Any] ,lowerCamelCase_: Union[str, Any] ) -> int:
UpperCAmelCase_ : Optional[Any] = self.get_sd_vae_model(fpaa=lowerCamelCase_ )
UpperCAmelCase_ : List[str] = self.get_sd_image(lowerCamelCase_ ,shape=(3, 4, 64, 64) ,fpaa=lowerCamelCase_ )
with torch.no_grad():
UpperCAmelCase_ : Optional[Any] = model.decode(lowerCamelCase_ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
UpperCAmelCase_ : List[str] = model.decode(lowerCamelCase_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=1e-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() ,reason="""xformers is not required when using PyTorch 2.0.""" )
def A__ ( self: Optional[Any] ,lowerCamelCase_: Dict ) -> Union[str, Any]:
UpperCAmelCase_ : Tuple = self.get_sd_vae_model()
UpperCAmelCase_ : Any = self.get_sd_image(lowerCamelCase_ ,shape=(3, 4, 64, 64) )
with torch.no_grad():
UpperCAmelCase_ : Union[str, Any] = model.decode(lowerCamelCase_ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
UpperCAmelCase_ : Optional[Any] = model.decode(lowerCamelCase_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3_0_0_1, 0.0_9_1_8, -2.6_9_8_4, -3.9_7_2_0, -3.2_0_9_9, -5.0_3_5_3, 1.7_3_3_8, -0.2_0_6_5, 3.4_2_6_7]],
[47, [-1.5_0_3_0, -4.3_8_7_1, -6.0_3_5_5, -9.1_1_5_7, -1.6_6_6_1, -2.7_8_5_3, 2.1_6_0_7, -5.0_8_2_3, 2.5_6_3_3]],
# fmt: on
] )
def A__ ( self: Union[str, Any] ,lowerCamelCase_: Any ,lowerCamelCase_: Union[str, Any] ) -> Union[str, Any]:
UpperCAmelCase_ : Dict = self.get_sd_vae_model()
UpperCAmelCase_ : Optional[Any] = self.get_sd_image(lowerCamelCase_ )
UpperCAmelCase_ : str = self.get_generator(lowerCamelCase_ )
with torch.no_grad():
UpperCAmelCase_ : int = model.encode(lowerCamelCase_ ).latent_dist
UpperCAmelCase_ : Optional[Any] = dist.sample(generator=lowerCamelCase_ )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
UpperCAmelCase_ : Tuple = sample[0, -1, -3:, -3:].flatten().cpu()
UpperCAmelCase_ : Optional[Any] = torch.tensor(lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = 3e-3 if torch_device != """mps""" else 1e-2
assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=lowerCamelCase_ )
| 345 | 1 |
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
UpperCamelCase_ = logging.get_logger(__name__)
@dataclass
class _snake_case :
'''simple docstring'''
A__ : str = field(metadata={"help": "The name of the task to train on: " + ", ".join(glue_processors.keys() )} )
A__ : str = field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} )
A__ : int = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
A__ : bool = field(
default=__snake_case , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def A__ ( self: Dict ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[int] = self.task_name.lower()
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : Any = "train"
A__ : Tuple = "dev"
A__ : List[Any] = "test"
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : GlueDataTrainingArguments
A__ : str
A__ : List[InputFeatures]
def __init__( self: Any ,lowerCamelCase_: GlueDataTrainingArguments ,lowerCamelCase_: PreTrainedTokenizerBase ,lowerCamelCase_: Optional[int] = None ,lowerCamelCase_: Union[str, Split] = Split.train ,lowerCamelCase_: Optional[str] = None ,) -> Dict:
warnings.warn(
"""This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets """
"""library. You can have a look at this example script for pointers: """
"""https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py""" ,lowerCamelCase_ ,)
UpperCAmelCase_ : int = args
UpperCAmelCase_ : Union[str, Any] = glue_processors[args.task_name]()
UpperCAmelCase_ : Tuple = glue_output_modes[args.task_name]
if isinstance(lowerCamelCase_ ,lowerCamelCase_ ):
try:
UpperCAmelCase_ : Union[str, Any] = Split[mode]
except KeyError:
raise KeyError("""mode is not a valid split name""" )
# Load data features from cache or dataset file
UpperCAmelCase_ : List[Any] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir ,F'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}''' ,)
UpperCAmelCase_ : Optional[int] = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCAmelCase_ , UpperCAmelCase_ : int = label_list[2], label_list[1]
UpperCAmelCase_ : Dict = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
UpperCAmelCase_ : int = cached_features_file + """.lock"""
with FileLock(lowerCamelCase_ ):
if os.path.exists(lowerCamelCase_ ) and not args.overwrite_cache:
UpperCAmelCase_ : Dict = time.time()
UpperCAmelCase_ : Dict = torch.load(lowerCamelCase_ )
logger.info(
F'''Loading features from cached file {cached_features_file} [took %.3f s]''' ,time.time() - start )
else:
logger.info(F'''Creating features from dataset file at {args.data_dir}''' )
if mode == Split.dev:
UpperCAmelCase_ : Optional[int] = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
UpperCAmelCase_ : Union[str, Any] = self.processor.get_test_examples(args.data_dir )
else:
UpperCAmelCase_ : Union[str, Any] = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
UpperCAmelCase_ : Tuple = examples[:limit_length]
UpperCAmelCase_ : Any = glue_convert_examples_to_features(
lowerCamelCase_ ,lowerCamelCase_ ,max_length=args.max_seq_length ,label_list=lowerCamelCase_ ,output_mode=self.output_mode ,)
UpperCAmelCase_ : List[str] = time.time()
torch.save(self.features ,lowerCamelCase_ )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__( self: Optional[int] ) -> List[str]:
return len(self.features )
def __getitem__( self: Any ,lowerCamelCase_: Optional[Any] ) -> InputFeatures:
return self.features[i]
def A__ ( self: Tuple ) -> str:
return self.label_list
| 345 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=__snake_case )
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : str = field(default="automatic-speech-recognition" , metadata={"include_in_asdict_even_if_is_default": True} )
A__ : ClassVar[Features] = Features({"audio": Audio()} )
A__ : ClassVar[Features] = Features({"transcription": Value("string" )} )
A__ : str = "audio"
A__ : str = "transcription"
def A__ ( self: int ,lowerCamelCase_: Union[str, Any] ) -> Optional[Any]:
if self.audio_column not in features:
raise ValueError(F'''Column {self.audio_column} is not present in features.''' )
if not isinstance(features[self.audio_column] ,lowerCamelCase_ ):
raise ValueError(F'''Column {self.audio_column} is not an Audio type.''' )
UpperCAmelCase_ : Any = copy.deepcopy(self )
UpperCAmelCase_ : Union[str, Any] = self.input_schema.copy()
UpperCAmelCase_ : Any = features[self.audio_column]
UpperCAmelCase_ : Union[str, Any] = input_schema
return task_template
@property
def A__ ( self: List[str] ) -> Dict[str, str]:
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 345 | 1 |
def lowerCamelCase_ ( _a : list , _a : int = 0 ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = length or len(_a )
UpperCAmelCase_ : Optional[Any] = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = list_data[i + 1], list_data[i]
UpperCAmelCase_ : Any = True
return list_data if not swapped else bubble_sort(_a , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 345 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'''microsoft/layoutlmv3-base''': '''https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json''',
}
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : Optional[Any] = "layoutlmv3"
def __init__( self: str ,lowerCamelCase_: Any=50265 ,lowerCamelCase_: int=768 ,lowerCamelCase_: Any=12 ,lowerCamelCase_: Any=12 ,lowerCamelCase_: List[Any]=3072 ,lowerCamelCase_: str="gelu" ,lowerCamelCase_: List[str]=0.1 ,lowerCamelCase_: Any=0.1 ,lowerCamelCase_: Tuple=512 ,lowerCamelCase_: Union[str, Any]=2 ,lowerCamelCase_: Dict=0.0_2 ,lowerCamelCase_: List[str]=1e-5 ,lowerCamelCase_: int=1 ,lowerCamelCase_: int=0 ,lowerCamelCase_: List[str]=2 ,lowerCamelCase_: Dict=1024 ,lowerCamelCase_: Tuple=128 ,lowerCamelCase_: Tuple=128 ,lowerCamelCase_: Dict=True ,lowerCamelCase_: Union[str, Any]=32 ,lowerCamelCase_: Union[str, Any]=128 ,lowerCamelCase_: Tuple=64 ,lowerCamelCase_: Tuple=256 ,lowerCamelCase_: List[str]=True ,lowerCamelCase_: Optional[int]=True ,lowerCamelCase_: Any=True ,lowerCamelCase_: Dict=224 ,lowerCamelCase_: Optional[int]=3 ,lowerCamelCase_: Optional[int]=16 ,lowerCamelCase_: Dict=None ,**lowerCamelCase_: str ,) -> List[Any]:
super().__init__(
vocab_size=lowerCamelCase_ ,hidden_size=lowerCamelCase_ ,num_hidden_layers=lowerCamelCase_ ,num_attention_heads=lowerCamelCase_ ,intermediate_size=lowerCamelCase_ ,hidden_act=lowerCamelCase_ ,hidden_dropout_prob=lowerCamelCase_ ,attention_probs_dropout_prob=lowerCamelCase_ ,max_position_embeddings=lowerCamelCase_ ,type_vocab_size=lowerCamelCase_ ,initializer_range=lowerCamelCase_ ,layer_norm_eps=lowerCamelCase_ ,pad_token_id=lowerCamelCase_ ,bos_token_id=lowerCamelCase_ ,eos_token_id=lowerCamelCase_ ,**lowerCamelCase_ ,)
UpperCAmelCase_ : List[Any] = max_ad_position_embeddings
UpperCAmelCase_ : Optional[int] = coordinate_size
UpperCAmelCase_ : Optional[int] = shape_size
UpperCAmelCase_ : Optional[Any] = has_relative_attention_bias
UpperCAmelCase_ : Optional[int] = rel_pos_bins
UpperCAmelCase_ : Union[str, Any] = max_rel_pos
UpperCAmelCase_ : Dict = has_spatial_attention_bias
UpperCAmelCase_ : Optional[int] = rel_ad_pos_bins
UpperCAmelCase_ : Tuple = max_rel_ad_pos
UpperCAmelCase_ : Union[str, Any] = text_embed
UpperCAmelCase_ : Optional[Any] = visual_embed
UpperCAmelCase_ : List[str] = input_size
UpperCAmelCase_ : str = num_channels
UpperCAmelCase_ : Optional[int] = patch_size
UpperCAmelCase_ : Tuple = classifier_dropout
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : Optional[Any] = version.parse("1.12" )
@property
def A__ ( self: Dict ) -> Mapping[str, Mapping[int, str]]:
# The order of inputs is different for question answering and sequence classification
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
else:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels"""}),
] )
@property
def A__ ( self: Any ) -> float:
return 1e-5
@property
def A__ ( self: int ) -> int:
return 12
def A__ ( self: List[str] ,lowerCamelCase_: "ProcessorMixin" ,lowerCamelCase_: int = -1 ,lowerCamelCase_: int = -1 ,lowerCamelCase_: bool = False ,lowerCamelCase_: Optional["TensorType"] = None ,lowerCamelCase_: int = 3 ,lowerCamelCase_: int = 40 ,lowerCamelCase_: int = 40 ,) -> Mapping[str, Any]:
setattr(processor.image_processor ,"""apply_ocr""" ,lowerCamelCase_ )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCAmelCase_ : List[str] = compute_effective_axis_dimension(
lowerCamelCase_ ,fixed_dimension=OnnxConfig.default_fixed_batch ,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCAmelCase_ : int = processor.tokenizer.num_special_tokens_to_add(lowerCamelCase_ )
UpperCAmelCase_ : int = compute_effective_axis_dimension(
lowerCamelCase_ ,fixed_dimension=OnnxConfig.default_fixed_sequence ,num_token_to_add=lowerCamelCase_ )
# Generate dummy inputs according to compute batch and sequence
UpperCAmelCase_ : Optional[int] = [[""" """.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
UpperCAmelCase_ : List[Any] = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
UpperCAmelCase_ : Any = self._generate_dummy_images(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = dict(
processor(
lowerCamelCase_ ,text=lowerCamelCase_ ,boxes=lowerCamelCase_ ,return_tensors=lowerCamelCase_ ,) )
return inputs
| 345 | 1 |
from __future__ import annotations
def lowerCamelCase_ ( _a : list[int | float] , _a : int , _a : int ):
'''simple docstring'''
if len(_a ) == 0:
raise ValueError("""find_max() arg is an empty sequence""" )
if (
left >= len(_a )
or left < -len(_a )
or right >= len(_a )
or right < -len(_a )
):
raise IndexError("""list index out of range""" )
if left == right:
return nums[left]
UpperCAmelCase_ : Tuple = (left + right) >> 1 # the middle
UpperCAmelCase_ : str = find_max(_a , _a , _a ) # find max in range[left, mid]
UpperCAmelCase_ : Any = find_max(_a , mid + 1 , _a ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 345 |
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def lowerCamelCase_ ( _a : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = [
"""decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(_a , _a )
def lowerCamelCase_ ( _a : Any ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = emb.weight.shape
UpperCAmelCase_ : Tuple = nn.Linear(_a , _a , bias=_a )
UpperCAmelCase_ : List[Any] = emb.weight.data
return lin_layer
def lowerCamelCase_ ( _a : Dict ):
'''simple docstring'''
UpperCAmelCase_ : int = torch.load(_a , map_location="""cpu""" )
UpperCAmelCase_ : Dict = Namespace(**checkpoint["""cfg"""]["""model"""] )
UpperCAmelCase_ : Optional[int] = checkpoint["""model"""]
remove_ignore_keys_(_a )
UpperCAmelCase_ : str = state_dict["""decoder.embed_tokens.weight"""].shape[0]
UpperCAmelCase_ : List[str] = {key.replace("""decoder""" , """model""" ): val for key, val in state_dict.items()}
UpperCAmelCase_ : int = XGLMConfig(
vocab_size=_a , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""gelu""" , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , )
UpperCAmelCase_ : List[str] = XGLMForCausalLM(_a )
UpperCAmelCase_ : Tuple = model.load_state_dict(_a , strict=_a )
print(_a )
UpperCAmelCase_ : Optional[Any] = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
UpperCamelCase_ = parser.parse_args()
UpperCamelCase_ = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 345 | 1 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class _snake_case ( TensorFormatter[Mapping, "torch.Tensor", Mapping] ):
'''simple docstring'''
def __init__( self: Optional[Any] ,lowerCamelCase_: List[Any]=None ,**lowerCamelCase_: Union[str, Any] ) -> Dict:
super().__init__(features=lowerCamelCase_ )
UpperCAmelCase_ : List[str] = torch_tensor_kwargs
import torch # noqa import torch at initialization
def A__ ( self: Any ,lowerCamelCase_: int ) -> Union[str, Any]:
import torch
if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) and column:
if all(
isinstance(lowerCamelCase_ ,torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(lowerCamelCase_ )
return column
def A__ ( self: int ,lowerCamelCase_: List[Any] ) -> Any:
import torch
if isinstance(lowerCamelCase_ ,(str, bytes, type(lowerCamelCase_ )) ):
return value
elif isinstance(lowerCamelCase_ ,(np.character, np.ndarray) ) and np.issubdtype(value.dtype ,np.character ):
return value.tolist()
UpperCAmelCase_ : str = {}
if isinstance(lowerCamelCase_ ,(np.number, np.ndarray) ) and np.issubdtype(value.dtype ,np.integer ):
UpperCAmelCase_ : str = {"""dtype""": torch.intaa}
elif isinstance(lowerCamelCase_ ,(np.number, np.ndarray) ) and np.issubdtype(value.dtype ,np.floating ):
UpperCAmelCase_ : Any = {"""dtype""": torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(lowerCamelCase_ ,PIL.Image.Image ):
UpperCAmelCase_ : Dict = np.asarray(lowerCamelCase_ )
return torch.tensor(lowerCamelCase_ ,**{**default_dtype, **self.torch_tensor_kwargs} )
def A__ ( self: Optional[Any] ,lowerCamelCase_: Dict ) -> Optional[int]:
import torch
# support for torch, tf, jax etc.
if hasattr(lowerCamelCase_ ,"""__array__""" ) and not isinstance(lowerCamelCase_ ,torch.Tensor ):
UpperCAmelCase_ : Tuple = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(lowerCamelCase_ ,np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(lowerCamelCase_ ) for substruct in data_struct] )
elif isinstance(lowerCamelCase_ ,(list, tuple) ):
return self._consolidate([self.recursive_tensorize(lowerCamelCase_ ) for substruct in data_struct] )
return self._tensorize(lowerCamelCase_ )
def A__ ( self: List[str] ,lowerCamelCase_: dict ) -> Tuple:
return map_nested(self._recursive_tensorize ,lowerCamelCase_ ,map_list=lowerCamelCase_ )
def A__ ( self: Tuple ,lowerCamelCase_: pa.Table ) -> Mapping:
UpperCAmelCase_ : Dict = self.numpy_arrow_extractor().extract_row(lowerCamelCase_ )
UpperCAmelCase_ : List[str] = self.python_features_decoder.decode_row(lowerCamelCase_ )
return self.recursive_tensorize(lowerCamelCase_ )
def A__ ( self: Dict ,lowerCamelCase_: pa.Table ) -> "torch.Tensor":
UpperCAmelCase_ : str = self.numpy_arrow_extractor().extract_column(lowerCamelCase_ )
UpperCAmelCase_ : Any = self.python_features_decoder.decode_column(lowerCamelCase_ ,pa_table.column_names[0] )
UpperCAmelCase_ : List[str] = self.recursive_tensorize(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = self._consolidate(lowerCamelCase_ )
return column
def A__ ( self: Tuple ,lowerCamelCase_: pa.Table ) -> Mapping:
UpperCAmelCase_ : int = self.numpy_arrow_extractor().extract_batch(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = self.python_features_decoder.decode_batch(lowerCamelCase_ )
UpperCAmelCase_ : str = self.recursive_tensorize(lowerCamelCase_ )
for column_name in batch:
UpperCAmelCase_ : str = self._consolidate(batch[column_name] )
return batch
| 345 |
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _snake_case :
'''simple docstring'''
def __init__( self: List[Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Union[str, Any]=13 ,lowerCamelCase_: Optional[int]=32 ,lowerCamelCase_: List[str]=2 ,lowerCamelCase_: Optional[Any]=3 ,lowerCamelCase_: int=16 ,lowerCamelCase_: Optional[Any]=[32, 64, 128] ,lowerCamelCase_: Optional[int]=[1, 2, 1] ,lowerCamelCase_: Union[str, Any]=[2, 2, 4] ,lowerCamelCase_: int=2 ,lowerCamelCase_: List[str]=2.0 ,lowerCamelCase_: List[Any]=True ,lowerCamelCase_: List[str]=0.0 ,lowerCamelCase_: List[str]=0.0 ,lowerCamelCase_: Optional[int]=0.1 ,lowerCamelCase_: Optional[int]="gelu" ,lowerCamelCase_: Any=False ,lowerCamelCase_: Dict=True ,lowerCamelCase_: Union[str, Any]=0.0_2 ,lowerCamelCase_: int=1e-5 ,lowerCamelCase_: int=True ,lowerCamelCase_: Tuple=None ,lowerCamelCase_: str=True ,lowerCamelCase_: Dict=10 ,lowerCamelCase_: str=8 ,lowerCamelCase_: Union[str, Any]=["stage1", "stage2"] ,lowerCamelCase_: Optional[Any]=[1, 2] ,) -> str:
UpperCAmelCase_ : List[Any] = parent
UpperCAmelCase_ : Tuple = batch_size
UpperCAmelCase_ : Any = image_size
UpperCAmelCase_ : str = patch_size
UpperCAmelCase_ : List[str] = num_channels
UpperCAmelCase_ : Dict = embed_dim
UpperCAmelCase_ : Dict = hidden_sizes
UpperCAmelCase_ : str = depths
UpperCAmelCase_ : int = num_heads
UpperCAmelCase_ : List[Any] = window_size
UpperCAmelCase_ : Union[str, Any] = mlp_ratio
UpperCAmelCase_ : int = qkv_bias
UpperCAmelCase_ : List[str] = hidden_dropout_prob
UpperCAmelCase_ : Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase_ : Optional[int] = drop_path_rate
UpperCAmelCase_ : Union[str, Any] = hidden_act
UpperCAmelCase_ : List[Any] = use_absolute_embeddings
UpperCAmelCase_ : List[Any] = patch_norm
UpperCAmelCase_ : int = layer_norm_eps
UpperCAmelCase_ : int = initializer_range
UpperCAmelCase_ : Optional[Any] = is_training
UpperCAmelCase_ : Optional[Any] = scope
UpperCAmelCase_ : Union[str, Any] = use_labels
UpperCAmelCase_ : Union[str, Any] = type_sequence_label_size
UpperCAmelCase_ : Optional[int] = encoder_stride
UpperCAmelCase_ : Optional[int] = out_features
UpperCAmelCase_ : Optional[int] = out_indices
def A__ ( self: Union[str, Any] ) -> List[Any]:
UpperCAmelCase_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ : int = None
if self.use_labels:
UpperCAmelCase_ : str = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase_ : Any = self.get_config()
return config, pixel_values, labels
def A__ ( self: List[Any] ) -> Tuple:
return FocalNetConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,num_heads=self.num_heads ,window_size=self.window_size ,mlp_ratio=self.mlp_ratio ,qkv_bias=self.qkv_bias ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,drop_path_rate=self.drop_path_rate ,hidden_act=self.hidden_act ,use_absolute_embeddings=self.use_absolute_embeddings ,path_norm=self.patch_norm ,layer_norm_eps=self.layer_norm_eps ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,out_features=self.out_features ,out_indices=self.out_indices ,)
def A__ ( self: Dict ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: str ,lowerCamelCase_: str ) -> List[str]:
UpperCAmelCase_ : Optional[int] = FocalNetModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : List[Any] = model(lowerCamelCase_ )
UpperCAmelCase_ : Dict = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
UpperCAmelCase_ : Optional[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, expected_seq_len, expected_dim) )
def A__ ( self: Union[str, Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Any ,lowerCamelCase_: Optional[int] ) -> List[str]:
UpperCAmelCase_ : List[str] = FocalNetBackbone(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Tuple = model(lowerCamelCase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,len(config.out_features ) )
self.parent.assertListEqual(model.channels ,config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : List[str] = FocalNetBackbone(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Tuple = model(lowerCamelCase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,1 )
self.parent.assertListEqual(model.channels ,[config.hidden_sizes[-1]] )
def A__ ( self: Optional[int] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Union[str, Any] ) -> List[Any]:
UpperCAmelCase_ : Any = FocalNetForMaskedImageModeling(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(lowerCamelCase_ )
self.parent.assertEqual(
result.reconstruction.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCAmelCase_ : int = 1
UpperCAmelCase_ : List[str] = FocalNetForMaskedImageModeling(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ : Optional[int] = model(lowerCamelCase_ )
self.parent.assertEqual(result.reconstruction.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def A__ ( self: List[str] ,lowerCamelCase_: List[str] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Any ) -> int:
UpperCAmelCase_ : List[Any] = self.type_sequence_label_size
UpperCAmelCase_ : int = FocalNetForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ ,labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase_ : List[Any] = 1
UpperCAmelCase_ : Optional[int] = FocalNetForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ : List[str] = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def A__ ( self: Union[str, Any] ) -> Optional[int]:
UpperCAmelCase_ : List[Any] = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = config_and_inputs
UpperCAmelCase_ : int = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _snake_case ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
A__ : List[Any] = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
A__ : Union[str, Any] = (
{"feature-extraction": FocalNetModel, "image-classification": FocalNetForImageClassification}
if is_torch_available()
else {}
)
A__ : Optional[Any] = False
A__ : Any = False
A__ : List[str] = False
A__ : Any = False
A__ : Any = False
def A__ ( self: List[str] ) -> Tuple:
UpperCAmelCase_ : Dict = FocalNetModelTester(self )
UpperCAmelCase_ : int = ConfigTester(self ,config_class=lowerCamelCase_ ,embed_dim=37 ,has_text_modality=lowerCamelCase_ )
def A__ ( self: List[str] ) -> int:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A__ ( self: List[str] ) -> Union[str, Any]:
return
def A__ ( self: str ) -> List[str]:
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def A__ ( self: Tuple ) -> int:
UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowerCamelCase_ )
def A__ ( self: Dict ) -> List[str]:
UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase_ )
def A__ ( self: int ) -> int:
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
@unittest.skip(reason="""FocalNet does not use inputs_embeds""" )
def A__ ( self: int ) -> Dict:
pass
@unittest.skip(reason="""FocalNet does not use feedforward chunking""" )
def A__ ( self: Optional[Any] ) -> Optional[Any]:
pass
def A__ ( self: Optional[Any] ) -> List[str]:
UpperCAmelCase_ , UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
UpperCAmelCase_ : Optional[Any] = model_class(lowerCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
UpperCAmelCase_ : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase_ ,nn.Linear ) )
def A__ ( self: str ) -> Optional[int]:
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
UpperCAmelCase_ : str = model_class(lowerCamelCase_ )
UpperCAmelCase_ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : Any = [*signature.parameters.keys()]
UpperCAmelCase_ : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,lowerCamelCase_ )
def A__ ( self: Dict ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Dict ,lowerCamelCase_: Any ) -> List[str]:
UpperCAmelCase_ : Tuple = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : Optional[int] = model(**self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) )
UpperCAmelCase_ : Any = outputs.hidden_states
UpperCAmelCase_ : List[Any] = getattr(
self.model_tester ,"""expected_num_hidden_layers""" ,len(self.model_tester.depths ) + 1 )
self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ )
# FocalNet has a different seq_length
UpperCAmelCase_ : int = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCAmelCase_ : Optional[int] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
UpperCAmelCase_ : Union[str, Any] = outputs.reshaped_hidden_states
self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = reshaped_hidden_states[0].shape
UpperCAmelCase_ : List[Any] = (
reshaped_hidden_states[0].view(lowerCamelCase_ ,lowerCamelCase_ ,height * width ).permute(0 ,2 ,1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
def A__ ( self: Any ) -> List[Any]:
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Optional[int] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
UpperCAmelCase_ : str = True
self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ : Union[str, Any] = True
self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
def A__ ( self: List[str] ) -> str:
UpperCAmelCase_ , UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Tuple = 3
UpperCAmelCase_ : Tuple = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
UpperCAmelCase_ : Union[str, Any] = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCAmelCase_ : Union[str, Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
UpperCAmelCase_ : Any = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
UpperCAmelCase_ : Optional[Any] = True
self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,(padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ : Optional[int] = True
self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,(padded_height, padded_width) )
@slow
def A__ ( self: Optional[int] ) -> Optional[Any]:
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Tuple = FocalNetModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def A__ ( self: Optional[Any] ) -> Optional[int]:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Optional[int] = _config_zero_init(lowerCamelCase_ )
for model_class in self.all_model_classes:
UpperCAmelCase_ : List[Any] = model_class(config=lowerCamelCase_ )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' ,)
@require_vision
@require_torch
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def A__ ( self: Optional[int] ) -> str:
# TODO update organization
return AutoImageProcessor.from_pretrained("""microsoft/focalnet-tiny""" ) if is_vision_available() else None
@slow
def A__ ( self: List[Any] ) -> List[str]:
UpperCAmelCase_ : Optional[int] = FocalNetForImageClassification.from_pretrained("""microsoft/focalnet-tiny""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : Tuple = self.default_image_processor
UpperCAmelCase_ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
UpperCAmelCase_ : Dict = image_processor(images=lowerCamelCase_ ,return_tensors="""pt""" ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : Dict = model(**lowerCamelCase_ )
# verify the logits
UpperCAmelCase_ : str = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,lowerCamelCase_ ,atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() ,281 )
@require_torch
class _snake_case ( __snake_case , unittest.TestCase ):
'''simple docstring'''
A__ : List[Any] = (FocalNetBackbone,) if is_torch_available() else ()
A__ : int = FocalNetConfig
A__ : List[str] = False
def A__ ( self: Any ) -> Optional[int]:
UpperCAmelCase_ : str = FocalNetModelTester(self )
| 345 | 1 |
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
UpperCamelCase_ = '''true'''
def lowerCamelCase_ ( _a : Union[str, Any] , _a : List[str]=82 , _a : Optional[int]=16 ):
'''simple docstring'''
set_seed(42 )
UpperCAmelCase_ : Optional[Any] = RegressionModel()
UpperCAmelCase_ : Union[str, Any] = deepcopy(_a )
UpperCAmelCase_ : List[Any] = RegressionDataset(length=_a )
UpperCAmelCase_ : Dict = DataLoader(_a , batch_size=_a )
model.to(accelerator.device )
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = accelerator.prepare(_a , _a )
return model, ddp_model, dataloader
def lowerCamelCase_ ( _a : Accelerator , _a : Tuple=False ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained("""hf-internal-testing/mrpc-bert-base-cased""" )
UpperCAmelCase_ : List[Any] = load_dataset("""glue""" , """mrpc""" , split="""validation""" )
def tokenize_function(_a : List[Any] ):
UpperCAmelCase_ : List[str] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=_a , max_length=_a )
return outputs
with accelerator.main_process_first():
UpperCAmelCase_ : Optional[int] = dataset.map(
_a , batched=_a , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
UpperCAmelCase_ : Union[str, Any] = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(_a : str ):
if use_longest:
return tokenizer.pad(_a , padding="""longest""" , return_tensors="""pt""" )
return tokenizer.pad(_a , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return DataLoader(_a , shuffle=_a , collate_fn=_a , batch_size=16 )
def lowerCamelCase_ ( _a : Tuple , _a : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = Accelerator(dispatch_batches=_a , split_batches=_a )
UpperCAmelCase_ : Optional[int] = get_dataloader(_a , not dispatch_batches )
UpperCAmelCase_ : Tuple = AutoModelForSequenceClassification.from_pretrained(
"""hf-internal-testing/mrpc-bert-base-cased""" , return_dict=_a )
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = accelerator.prepare(_a , _a )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def lowerCamelCase_ ( _a : Optional[Any] , _a : List[str] , _a : str ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = []
for batch in dataloader:
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = batch.values()
with torch.no_grad():
UpperCAmelCase_ : Any = model(_a )
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
UpperCAmelCase_ , UpperCAmelCase_ : str = [], []
for logit, targ in logits_and_targets:
logits.append(_a )
targs.append(_a )
UpperCAmelCase_ , UpperCAmelCase_ : str = torch.cat(_a ), torch.cat(_a )
return logits, targs
def lowerCamelCase_ ( _a : Accelerator , _a : Dict=82 , _a : str=False , _a : Optional[Any]=False , _a : Union[str, Any]=16 ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = get_basic_setup(_a , _a , _a )
UpperCAmelCase_ , UpperCAmelCase_ : Dict = generate_predictions(_a , _a , _a )
assert (
len(_a ) == num_samples
), F'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(_a )}'''
def lowerCamelCase_ ( _a : bool = False , _a : bool = False ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = evaluate.load("""glue""" , """mrpc""" )
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = get_mrpc_setup(_a , _a )
# First do baseline
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = setup["""no"""]
model.to(_a )
model.eval()
for batch in dataloader:
batch.to(_a )
with torch.inference_mode():
UpperCAmelCase_ : Optional[Any] = model(**_a )
UpperCAmelCase_ : List[Any] = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=_a , references=batch["""labels"""] )
UpperCAmelCase_ : int = metric.compute()
# Then do distributed
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = setup["""ddp"""]
model.eval()
for batch in dataloader:
with torch.inference_mode():
UpperCAmelCase_ : str = model(**_a )
UpperCAmelCase_ : List[str] = outputs.logits.argmax(dim=-1 )
UpperCAmelCase_ : int = batch["""labels"""]
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=_a , references=_a )
UpperCAmelCase_ : int = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), F'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'''
def lowerCamelCase_ ( ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = Accelerator(split_batches=_a , dispatch_batches=_a )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print("""**Testing gather_for_metrics**""" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''' )
test_mrpc(_a , _a )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("""**Test torch metrics**""" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
UpperCAmelCase_ : int = Accelerator(split_batches=_a , dispatch_batches=_a )
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''' )
test_torch_metrics(_a , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("""**Test last batch is not dropped when perfectly divisible**""" )
UpperCAmelCase_ : Any = Accelerator()
test_torch_metrics(_a , 512 )
accelerator.state._reset_state()
def lowerCamelCase_ ( _a : Union[str, Any] ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 345 |
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _snake_case :
'''simple docstring'''
def __init__( self: Tuple ,lowerCamelCase_: List[str] ,lowerCamelCase_: int=13 ,lowerCamelCase_: int=32 ,lowerCamelCase_: Optional[int]=2 ,lowerCamelCase_: Any=3 ,lowerCamelCase_: str=16 ,lowerCamelCase_: Optional[Any]=[1, 2, 1] ,lowerCamelCase_: Tuple=[2, 2, 4] ,lowerCamelCase_: int=2 ,lowerCamelCase_: List[Any]=2.0 ,lowerCamelCase_: str=True ,lowerCamelCase_: Optional[int]=0.0 ,lowerCamelCase_: List[Any]=0.0 ,lowerCamelCase_: List[str]=0.1 ,lowerCamelCase_: Tuple="gelu" ,lowerCamelCase_: Union[str, Any]=False ,lowerCamelCase_: Union[str, Any]=True ,lowerCamelCase_: Optional[int]=0.0_2 ,lowerCamelCase_: int=1e-5 ,lowerCamelCase_: Optional[int]=True ,lowerCamelCase_: Union[str, Any]=None ,lowerCamelCase_: Union[str, Any]=True ,lowerCamelCase_: Optional[int]=10 ,lowerCamelCase_: Tuple=8 ,) -> List[Any]:
UpperCAmelCase_ : List[str] = parent
UpperCAmelCase_ : int = batch_size
UpperCAmelCase_ : int = image_size
UpperCAmelCase_ : Union[str, Any] = patch_size
UpperCAmelCase_ : Optional[Any] = num_channels
UpperCAmelCase_ : int = embed_dim
UpperCAmelCase_ : Union[str, Any] = depths
UpperCAmelCase_ : List[str] = num_heads
UpperCAmelCase_ : int = window_size
UpperCAmelCase_ : List[str] = mlp_ratio
UpperCAmelCase_ : Tuple = qkv_bias
UpperCAmelCase_ : Tuple = hidden_dropout_prob
UpperCAmelCase_ : str = attention_probs_dropout_prob
UpperCAmelCase_ : Tuple = drop_path_rate
UpperCAmelCase_ : List[str] = hidden_act
UpperCAmelCase_ : int = use_absolute_embeddings
UpperCAmelCase_ : Any = patch_norm
UpperCAmelCase_ : Optional[int] = layer_norm_eps
UpperCAmelCase_ : Tuple = initializer_range
UpperCAmelCase_ : Optional[Any] = is_training
UpperCAmelCase_ : Dict = scope
UpperCAmelCase_ : int = use_labels
UpperCAmelCase_ : Optional[Any] = type_sequence_label_size
UpperCAmelCase_ : List[str] = encoder_stride
def A__ ( self: Any ) -> int:
UpperCAmelCase_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ : List[Any] = None
if self.use_labels:
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase_ : str = self.get_config()
return config, pixel_values, labels
def A__ ( self: List[Any] ) -> Union[str, Any]:
return SwinvaConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,depths=self.depths ,num_heads=self.num_heads ,window_size=self.window_size ,mlp_ratio=self.mlp_ratio ,qkv_bias=self.qkv_bias ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,drop_path_rate=self.drop_path_rate ,hidden_act=self.hidden_act ,use_absolute_embeddings=self.use_absolute_embeddings ,path_norm=self.patch_norm ,layer_norm_eps=self.layer_norm_eps ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,)
def A__ ( self: Dict ,lowerCamelCase_: Tuple ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: List[str] ) -> str:
UpperCAmelCase_ : str = SwinvaModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
UpperCAmelCase_ : List[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, expected_seq_len, expected_dim) )
def A__ ( self: List[Any] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: int ,lowerCamelCase_: int ) -> int:
UpperCAmelCase_ : Any = SwinvaForMaskedImageModeling(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCAmelCase_ : str = 1
UpperCAmelCase_ : Optional[Any] = SwinvaForMaskedImageModeling(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ : int = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def A__ ( self: int ,lowerCamelCase_: int ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Optional[Any] ) -> int:
UpperCAmelCase_ : Union[str, Any] = self.type_sequence_label_size
UpperCAmelCase_ : int = SwinvaForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Optional[int] = model(lowerCamelCase_ ,labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def A__ ( self: str ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[Any] = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = config_and_inputs
UpperCAmelCase_ : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _snake_case ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
A__ : Tuple = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
A__ : Optional[Any] = (
{"feature-extraction": SwinvaModel, "image-classification": SwinvaForImageClassification}
if is_torch_available()
else {}
)
A__ : List[Any] = False
A__ : Tuple = False
A__ : int = False
A__ : Union[str, Any] = False
def A__ ( self: List[str] ) -> Optional[Any]:
UpperCAmelCase_ : Any = SwinvaModelTester(self )
UpperCAmelCase_ : str = ConfigTester(self ,config_class=lowerCamelCase_ ,embed_dim=37 )
def A__ ( self: Optional[int] ) -> List[Any]:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A__ ( self: Any ) -> Dict:
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
@unittest.skip(reason="""Got `CUDA error: misaligned address` with PyTorch 2.0.0.""" )
def A__ ( self: int ) -> Dict:
pass
@unittest.skip(reason="""Swinv2 does not use inputs_embeds""" )
def A__ ( self: Tuple ) -> List[str]:
pass
def A__ ( self: str ) -> List[Any]:
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : int = model_class(lowerCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
UpperCAmelCase_ : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase_ ,nn.Linear ) )
def A__ ( self: Optional[Any] ) -> Optional[int]:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Dict = model_class(lowerCamelCase_ )
UpperCAmelCase_ : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : int = [*signature.parameters.keys()]
UpperCAmelCase_ : Tuple = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,lowerCamelCase_ )
def A__ ( self: Union[str, Any] ) -> Optional[Any]:
UpperCAmelCase_ , UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Any = True
for model_class in self.all_model_classes:
UpperCAmelCase_ : Optional[Any] = True
UpperCAmelCase_ : Union[str, Any] = False
UpperCAmelCase_ : str = True
UpperCAmelCase_ : List[Any] = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : Optional[int] = model(**self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) )
UpperCAmelCase_ : Optional[Any] = outputs.attentions
UpperCAmelCase_ : List[str] = len(self.model_tester.depths )
self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCAmelCase_ : str = True
UpperCAmelCase_ : Optional[Any] = config.window_size**2
UpperCAmelCase_ : Optional[int] = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : Optional[Any] = model(**self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) )
UpperCAmelCase_ : List[Any] = outputs.attentions
self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ )
self.assertListEqual(
list(attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,)
UpperCAmelCase_ : Optional[Any] = len(lowerCamelCase_ )
# Check attention is always last and order is fine
UpperCAmelCase_ : Tuple = True
UpperCAmelCase_ : List[Any] = True
UpperCAmelCase_ : Tuple = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : Union[str, Any] = model(**self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) )
if hasattr(self.model_tester ,"""num_hidden_states_types""" ):
UpperCAmelCase_ : List[Any] = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
UpperCAmelCase_ : List[str] = 2
self.assertEqual(out_len + added_hidden_states ,len(lowerCamelCase_ ) )
UpperCAmelCase_ : Any = outputs.attentions
self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,)
def A__ ( self: List[str] ,lowerCamelCase_: Dict ,lowerCamelCase_: Tuple ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Optional[int] ) -> List[Any]:
UpperCAmelCase_ : str = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : int = model(**self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) )
UpperCAmelCase_ : List[str] = outputs.hidden_states
UpperCAmelCase_ : Optional[Any] = getattr(
self.model_tester ,"""expected_num_hidden_layers""" ,len(self.model_tester.depths ) + 1 )
self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ )
# Swinv2 has a different seq_length
UpperCAmelCase_ : Optional[Any] = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCAmelCase_ : int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
UpperCAmelCase_ : Optional[int] = outputs.reshaped_hidden_states
self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = reshaped_hidden_states[0].shape
UpperCAmelCase_ : Optional[Any] = (
reshaped_hidden_states[0].view(lowerCamelCase_ ,lowerCamelCase_ ,height * width ).permute(0 ,2 ,1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
def A__ ( self: Any ) -> int:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Dict = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
UpperCAmelCase_ : Any = True
self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ : str = True
self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
def A__ ( self: List[str] ) -> Dict:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Union[str, Any] = 3
UpperCAmelCase_ : Optional[int] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
UpperCAmelCase_ : List[str] = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCAmelCase_ : List[Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
UpperCAmelCase_ : Optional[Any] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
UpperCAmelCase_ : Optional[Any] = True
self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,(padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ : List[str] = True
self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,(padded_height, padded_width) )
def A__ ( self: Optional[int] ) -> str:
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase_ )
def A__ ( self: Union[str, Any] ) -> Dict:
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
@slow
def A__ ( self: str ) -> Tuple:
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Dict = SwinvaModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def A__ ( self: Any ) -> int:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : List[str] = _config_zero_init(lowerCamelCase_ )
for model_class in self.all_model_classes:
UpperCAmelCase_ : int = model_class(config=lowerCamelCase_ )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' ,)
@require_vision
@require_torch
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def A__ ( self: Dict ) -> Optional[Any]:
return (
AutoImageProcessor.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" )
if is_vision_available()
else None
)
@slow
def A__ ( self: str ) -> List[Any]:
UpperCAmelCase_ : Tuple = SwinvaForImageClassification.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" ).to(
lowerCamelCase_ )
UpperCAmelCase_ : Any = self.default_image_processor
UpperCAmelCase_ : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
UpperCAmelCase_ : Optional[int] = image_processor(images=lowerCamelCase_ ,return_tensors="""pt""" ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : Optional[Any] = model(**lowerCamelCase_ )
# verify the logits
UpperCAmelCase_ : Dict = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,lowerCamelCase_ )
UpperCAmelCase_ : Any = torch.tensor([-0.3_9_4_7, -0.4_3_0_6, 0.0_0_2_6] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,lowerCamelCase_ ,atol=1e-4 ) )
| 345 | 1 |
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class _snake_case ( __snake_case ):
'''simple docstring'''
def __lt__( self: List[str] ,lowerCamelCase_: Optional[int] ) -> int:
return self[-1] < other[-1]
def __eq__( self: Union[str, Any] ,lowerCamelCase_: List[str] ) -> Tuple:
return self[-1] == other[-1]
def lowerCamelCase_ ( _a : list ):
'''simple docstring'''
UpperCAmelCase_ : list[Stack] = []
# sort into stacks
for element in collection:
UpperCAmelCase_ : Union[str, Any] = Stack([element] )
UpperCAmelCase_ : int = bisect_left(_a , _a )
if i != len(_a ):
stacks[i].append(_a )
else:
stacks.append(_a )
# use a heap-based merge to merge stack efficiently
UpperCAmelCase_ : List[str] = merge(*(reversed(_a ) for stack in stacks) )
return collection
if __name__ == "__main__":
UpperCamelCase_ = input('''Enter numbers separated by a comma:\n''').strip()
UpperCamelCase_ = [int(item) for item in user_input.split(''',''')]
print(patience_sort(unsorted))
| 345 |
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
# See all LED models at https://huggingface.co/models?filter=LED
UpperCamelCase_ = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
UpperCamelCase_ = {
'''allenai/led-base-16384''': 16384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def lowerCamelCase_ ( ):
'''simple docstring'''
UpperCAmelCase_ : int = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
UpperCAmelCase_ : Dict = bs[:]
UpperCAmelCase_ : Any = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_a )
cs.append(2**8 + n )
n += 1
UpperCAmelCase_ : Any = [chr(_a ) for n in cs]
return dict(zip(_a , _a ) )
def lowerCamelCase_ ( _a : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = set()
UpperCAmelCase_ : List[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase_ : Optional[int] = char
return pairs
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : str = VOCAB_FILES_NAMES
A__ : List[str] = PRETRAINED_VOCAB_FILES_MAP
A__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Optional[int] = ["input_ids", "attention_mask"]
def __init__( self: Union[str, Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Any ,lowerCamelCase_: Union[str, Any]="replace" ,lowerCamelCase_: Optional[Any]="<s>" ,lowerCamelCase_: List[Any]="</s>" ,lowerCamelCase_: List[str]="</s>" ,lowerCamelCase_: int="<s>" ,lowerCamelCase_: int="<unk>" ,lowerCamelCase_: str="<pad>" ,lowerCamelCase_: Optional[Any]="<mask>" ,lowerCamelCase_: List[str]=False ,**lowerCamelCase_: Tuple ,) -> Any:
UpperCAmelCase_ : Union[str, Any] = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else bos_token
UpperCAmelCase_ : int = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else eos_token
UpperCAmelCase_ : List[str] = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else sep_token
UpperCAmelCase_ : List[str] = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else cls_token
UpperCAmelCase_ : Optional[Any] = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else unk_token
UpperCAmelCase_ : List[str] = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase_ : str = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else mask_token
super().__init__(
errors=lowerCamelCase_ ,bos_token=lowerCamelCase_ ,eos_token=lowerCamelCase_ ,unk_token=lowerCamelCase_ ,sep_token=lowerCamelCase_ ,cls_token=lowerCamelCase_ ,pad_token=lowerCamelCase_ ,mask_token=lowerCamelCase_ ,add_prefix_space=lowerCamelCase_ ,**lowerCamelCase_ ,)
with open(lowerCamelCase_ ,encoding="""utf-8""" ) as vocab_handle:
UpperCAmelCase_ : Union[str, Any] = json.load(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = {v: k for k, v in self.encoder.items()}
UpperCAmelCase_ : Any = errors # how to handle errors in decoding
UpperCAmelCase_ : int = bytes_to_unicode()
UpperCAmelCase_ : Dict = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCamelCase_ ,encoding="""utf-8""" ) as merges_handle:
UpperCAmelCase_ : Any = merges_handle.read().split("""\n""" )[1:-1]
UpperCAmelCase_ : int = [tuple(merge.split() ) for merge in bpe_merges]
UpperCAmelCase_ : Union[str, Any] = dict(zip(lowerCamelCase_ ,range(len(lowerCamelCase_ ) ) ) )
UpperCAmelCase_ : Tuple = {}
UpperCAmelCase_ : Optional[int] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCAmelCase_ : int = re.compile(R"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def A__ ( self: List[str] ) -> List[str]:
return len(self.encoder )
def A__ ( self: Any ) -> Union[str, Any]:
return dict(self.encoder ,**self.added_tokens_encoder )
def A__ ( self: Tuple ,lowerCamelCase_: Dict ) -> Optional[Any]:
if token in self.cache:
return self.cache[token]
UpperCAmelCase_ : Union[str, Any] = tuple(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = get_pairs(lowerCamelCase_ )
if not pairs:
return token
while True:
UpperCAmelCase_ : Union[str, Any] = min(lowerCamelCase_ ,key=lambda lowerCamelCase_ : self.bpe_ranks.get(lowerCamelCase_ ,float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase_ , UpperCAmelCase_ : Any = bigram
UpperCAmelCase_ : Optional[Any] = []
UpperCAmelCase_ : List[str] = 0
while i < len(lowerCamelCase_ ):
try:
UpperCAmelCase_ : str = word.index(lowerCamelCase_ ,lowerCamelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCAmelCase_ : Union[str, Any] = j
if word[i] == first and i < len(lowerCamelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase_ : List[str] = tuple(lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = new_word
if len(lowerCamelCase_ ) == 1:
break
else:
UpperCAmelCase_ : List[str] = get_pairs(lowerCamelCase_ )
UpperCAmelCase_ : int = """ """.join(lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = word
return word
def A__ ( self: Union[str, Any] ,lowerCamelCase_: Tuple ) -> List[str]:
UpperCAmelCase_ : str = []
for token in re.findall(self.pat ,lowerCamelCase_ ):
UpperCAmelCase_ : List[Any] = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCamelCase_ ).split(""" """ ) )
return bpe_tokens
def A__ ( self: List[Any] ,lowerCamelCase_: Optional[Any] ) -> Optional[int]:
return self.encoder.get(lowerCamelCase_ ,self.encoder.get(self.unk_token ) )
def A__ ( self: List[str] ,lowerCamelCase_: str ) -> Optional[Any]:
return self.decoder.get(lowerCamelCase_ )
def A__ ( self: List[str] ,lowerCamelCase_: List[str] ) -> List[Any]:
UpperCAmelCase_ : str = """""".join(lowerCamelCase_ )
UpperCAmelCase_ : int = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" ,errors=self.errors )
return text
def A__ ( self: Optional[Any] ,lowerCamelCase_: str ,lowerCamelCase_: Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(lowerCamelCase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase_ : List[Any] = os.path.join(
lowerCamelCase_ ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCAmelCase_ : List[str] = os.path.join(
lowerCamelCase_ ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=lowerCamelCase_ ,ensure_ascii=lowerCamelCase_ ) + """\n""" )
UpperCAmelCase_ : str = 0
with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda lowerCamelCase_ : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
""" Please check that the tokenizer is not corrupted!""" )
UpperCAmelCase_ : Tuple = token_index
writer.write(""" """.join(lowerCamelCase_ ) + """\n""" )
index += 1
return vocab_file, merge_file
def A__ ( self: str ,lowerCamelCase_: List[int] ,lowerCamelCase_: Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase_ : int = [self.cls_token_id]
UpperCAmelCase_ : Optional[int] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A__ ( self: Union[str, Any] ,lowerCamelCase_: List[int] ,lowerCamelCase_: Optional[List[int]] = None ,lowerCamelCase_: bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_ ,token_ids_a=lowerCamelCase_ ,already_has_special_tokens=lowerCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase_ )) + [1]
return [1] + ([0] * len(lowerCamelCase_ )) + [1, 1] + ([0] * len(lowerCamelCase_ )) + [1]
def A__ ( self: str ,lowerCamelCase_: List[int] ,lowerCamelCase_: Optional[List[int]] = None ) -> List[int]:
UpperCAmelCase_ : Optional[Any] = [self.sep_token_id]
UpperCAmelCase_ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A__ ( self: Optional[Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: str=False ,**lowerCamelCase_: List[str] ) -> Optional[int]:
UpperCAmelCase_ : Optional[int] = kwargs.pop("""add_prefix_space""" ,self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCamelCase_ ) > 0 and not text[0].isspace()):
UpperCAmelCase_ : Dict = """ """ + text
return (text, kwargs)
def A__ ( self: List[str] ,lowerCamelCase_: Union[Dict[str, EncodedInput], BatchEncoding] ,lowerCamelCase_: Optional[int] = None ,lowerCamelCase_: PaddingStrategy = PaddingStrategy.DO_NOT_PAD ,lowerCamelCase_: Optional[int] = None ,lowerCamelCase_: Optional[bool] = None ,) -> dict:
UpperCAmelCase_ : Optional[int] = super()._pad(
encoded_inputs=lowerCamelCase_ ,max_length=lowerCamelCase_ ,padding_strategy=lowerCamelCase_ ,pad_to_multiple_of=lowerCamelCase_ ,return_attention_mask=lowerCamelCase_ ,)
# Load from model defaults
if return_attention_mask is None:
UpperCAmelCase_ : str = """attention_mask""" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
UpperCAmelCase_ : str = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
UpperCAmelCase_ : List[Any] = len(encoded_inputs["""global_attention_mask"""] ) != len(lowerCamelCase_ )
if needs_to_be_padded:
UpperCAmelCase_ : Dict = len(lowerCamelCase_ ) - len(encoded_inputs["""global_attention_mask"""] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
UpperCAmelCase_ : str = (
encoded_inputs["""global_attention_mask"""] + [-1] * difference
)
elif self.padding_side == "left":
UpperCAmelCase_ : List[str] = [-1] * difference + encoded_inputs[
"""global_attention_mask"""
]
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return encoded_inputs
| 345 | 1 |
import numpy as np
def lowerCamelCase_ ( _a : np.array ):
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 345 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: Union[str, Any] ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def A__ ( self: List[str] ) -> Dict:
UpperCAmelCase_ : Union[str, Any] = 1
UpperCAmelCase_ : Tuple = 3
UpperCAmelCase_ : Optional[Any] = (32, 32)
UpperCAmelCase_ : Optional[int] = floats_tensor((batch_size, num_channels) + sizes ,rng=random.Random(0 ) ).to(lowerCamelCase_ )
return image
@property
def A__ ( self: List[Any] ) -> Optional[Any]:
torch.manual_seed(0 )
UpperCAmelCase_ : int = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") ,up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") ,cross_attention_dim=32 ,)
return model
@property
def A__ ( self: str ) -> List[str]:
torch.manual_seed(0 )
UpperCAmelCase_ : Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,)
return model
@property
def A__ ( self: Optional[int] ) -> int:
torch.manual_seed(0 )
UpperCAmelCase_ : Dict = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,)
return CLIPTextModel(lowerCamelCase_ )
@property
def A__ ( self: Tuple ) -> Tuple:
def extract(*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: str ):
class _snake_case :
'''simple docstring'''
def __init__( self: List[Any] ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = torch.ones([0] )
def A__ ( self: List[Any] ,lowerCamelCase_: str ) -> int:
self.pixel_values.to(lowerCamelCase_ )
return self
return Out()
return extract
def A__ ( self: Union[str, Any] ) -> Tuple:
UpperCAmelCase_ : int = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : int = self.dummy_cond_unet
UpperCAmelCase_ : Optional[Any] = DDIMScheduler(
beta_start=0.0_0_0_8_5 ,beta_end=0.0_1_2 ,beta_schedule="""scaled_linear""" ,clip_sample=lowerCamelCase_ ,set_alpha_to_one=lowerCamelCase_ ,)
UpperCAmelCase_ : str = self.dummy_vae
UpperCAmelCase_ : List[str] = self.dummy_text_encoder
UpperCAmelCase_ : int = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
UpperCAmelCase_ : str = StableDiffusionPipeline(
unet=lowerCamelCase_ ,scheduler=lowerCamelCase_ ,vae=lowerCamelCase_ ,text_encoder=lowerCamelCase_ ,tokenizer=lowerCamelCase_ ,safety_checker=lowerCamelCase_ ,feature_extractor=self.dummy_extractor ,)
UpperCAmelCase_ : List[str] = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : List[str] = """A painting of a squirrel eating a burger"""
UpperCAmelCase_ : str = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
UpperCAmelCase_ : int = sd_pipe([prompt] ,generator=lowerCamelCase_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" )
UpperCAmelCase_ : List[Any] = output.images
UpperCAmelCase_ : str = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
UpperCAmelCase_ : Dict = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" ,return_dict=lowerCamelCase_ ,)[0]
UpperCAmelCase_ : int = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ : Tuple = np.array([0.5_7_5_6, 0.6_1_1_8, 0.5_0_0_5, 0.5_0_4_1, 0.5_4_7_1, 0.4_7_2_6, 0.4_9_7_6, 0.4_8_6_5, 0.4_8_6_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def A__ ( self: Optional[Any] ) -> Any:
UpperCAmelCase_ : Tuple = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : Dict = self.dummy_cond_unet
UpperCAmelCase_ : List[Any] = PNDMScheduler(skip_prk_steps=lowerCamelCase_ )
UpperCAmelCase_ : str = self.dummy_vae
UpperCAmelCase_ : Union[str, Any] = self.dummy_text_encoder
UpperCAmelCase_ : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
UpperCAmelCase_ : Any = StableDiffusionPipeline(
unet=lowerCamelCase_ ,scheduler=lowerCamelCase_ ,vae=lowerCamelCase_ ,text_encoder=lowerCamelCase_ ,tokenizer=lowerCamelCase_ ,safety_checker=lowerCamelCase_ ,feature_extractor=self.dummy_extractor ,)
UpperCAmelCase_ : int = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = """A painting of a squirrel eating a burger"""
UpperCAmelCase_ : Optional[Any] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
UpperCAmelCase_ : Optional[Any] = sd_pipe([prompt] ,generator=lowerCamelCase_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" )
UpperCAmelCase_ : str = output.images
UpperCAmelCase_ : Union[str, Any] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
UpperCAmelCase_ : int = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" ,return_dict=lowerCamelCase_ ,)[0]
UpperCAmelCase_ : Dict = image[0, -3:, -3:, -1]
UpperCAmelCase_ : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ : Tuple = np.array([0.5_1_2_5, 0.5_7_1_6, 0.4_8_2_8, 0.5_0_6_0, 0.5_6_5_0, 0.4_7_6_8, 0.5_1_8_5, 0.4_8_9_5, 0.4_9_9_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def A__ ( self: str ) -> Dict:
UpperCAmelCase_ : Any = StableDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-lms-pipe""" ,safety_checker=lowerCamelCase_ )
assert isinstance(lowerCamelCase_ ,lowerCamelCase_ )
assert isinstance(pipe.scheduler ,lowerCamelCase_ )
assert pipe.safety_checker is None
UpperCAmelCase_ : List[Any] = pipe("""example prompt""" ,num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCamelCase_ )
UpperCAmelCase_ : Any = StableDiffusionPipeline.from_pretrained(lowerCamelCase_ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
UpperCAmelCase_ : Optional[int] = pipe("""example prompt""" ,num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != """cuda""" ,"""This test requires a GPU""" )
def A__ ( self: List[str] ) -> Any:
UpperCAmelCase_ : Tuple = self.dummy_cond_unet
UpperCAmelCase_ : Dict = PNDMScheduler(skip_prk_steps=lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = self.dummy_vae
UpperCAmelCase_ : List[str] = self.dummy_text_encoder
UpperCAmelCase_ : Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# put models in fp16
UpperCAmelCase_ : Optional[Any] = unet.half()
UpperCAmelCase_ : Optional[int] = vae.half()
UpperCAmelCase_ : int = bert.half()
# make sure here that pndm scheduler skips prk
UpperCAmelCase_ : Any = StableDiffusionPipeline(
unet=lowerCamelCase_ ,scheduler=lowerCamelCase_ ,vae=lowerCamelCase_ ,text_encoder=lowerCamelCase_ ,tokenizer=lowerCamelCase_ ,safety_checker=lowerCamelCase_ ,feature_extractor=self.dummy_extractor ,)
UpperCAmelCase_ : List[Any] = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : Tuple = """A painting of a squirrel eating a burger"""
UpperCAmelCase_ : Optional[int] = sd_pipe([prompt] ,num_inference_steps=2 ,output_type="""np""" ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: Optional[int] ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self: List[str] ) -> List[Any]:
UpperCAmelCase_ : Tuple = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" ,safety_checker=lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
UpperCAmelCase_ : str = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : str = (
"""portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"""
""" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"""
""" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"""
""" children from bahnhof zoo, detailed """
)
UpperCAmelCase_ : Optional[int] = 4003660346
UpperCAmelCase_ : int = 7
# without safety guidance (sld_guidance_scale = 0)
UpperCAmelCase_ : Dict = torch.manual_seed(lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=0 ,)
UpperCAmelCase_ : Optional[int] = output.images
UpperCAmelCase_ : Union[str, Any] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Dict = [0.2_2_7_8, 0.2_2_3_1, 0.2_2_4_9, 0.2_3_3_3, 0.2_3_0_3, 0.1_8_8_5, 0.2_2_7_3, 0.2_1_4_4, 0.2_1_7_6]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
# without safety guidance (strong configuration)
UpperCAmelCase_ : Union[str, Any] = torch.manual_seed(lowerCamelCase_ )
UpperCAmelCase_ : Any = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=2000 ,sld_warmup_steps=7 ,sld_threshold=0.0_2_5 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,)
UpperCAmelCase_ : Tuple = output.images
UpperCAmelCase_ : Union[str, Any] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : str = [0.2_3_8_3, 0.2_2_7_6, 0.2_3_6, 0.2_1_9_2, 0.2_1_8_6, 0.2_0_5_3, 0.1_9_7_1, 0.1_9_0_1, 0.1_7_1_9]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A__ ( self: Optional[int] ) -> Any:
UpperCAmelCase_ : Any = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" ,safety_checker=lowerCamelCase_ )
UpperCAmelCase_ : Any = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
UpperCAmelCase_ : str = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : Any = """padme amidala taking a bath artwork, safe for work, no nudity"""
UpperCAmelCase_ : List[Any] = 2734971755
UpperCAmelCase_ : Optional[Any] = 7
UpperCAmelCase_ : int = torch.manual_seed(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=0 ,)
UpperCAmelCase_ : Dict = output.images
UpperCAmelCase_ : Tuple = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Optional[Any] = [0.3_5_0_2, 0.3_6_2_2, 0.3_3_9_6, 0.3_6_4_2, 0.3_4_7_8, 0.3_3_1_8, 0.3_5, 0.3_3_4_8, 0.3_2_9_7]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
UpperCAmelCase_ : Any = torch.manual_seed(lowerCamelCase_ )
UpperCAmelCase_ : Tuple = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=2000 ,sld_warmup_steps=7 ,sld_threshold=0.0_2_5 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,)
UpperCAmelCase_ : Dict = output.images
UpperCAmelCase_ : List[Any] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Tuple = [0.5_5_3_1, 0.5_2_0_6, 0.4_8_9_5, 0.5_1_5_6, 0.5_1_8_2, 0.4_7_5_1, 0.4_8_0_2, 0.4_8_0_3, 0.4_4_4_3]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A__ ( self: Union[str, Any] ) -> int:
UpperCAmelCase_ : List[Any] = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" )
UpperCAmelCase_ : List[str] = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : Any = (
"""the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."""
""" leyendecker"""
)
UpperCAmelCase_ : Optional[Any] = 1044355234
UpperCAmelCase_ : List[str] = 12
UpperCAmelCase_ : List[Any] = torch.manual_seed(lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=0 ,)
UpperCAmelCase_ : Any = output.images
UpperCAmelCase_ : Dict = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Optional[Any] = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-7
UpperCAmelCase_ : Optional[int] = torch.manual_seed(lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=2000 ,sld_warmup_steps=7 ,sld_threshold=0.0_2_5 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,)
UpperCAmelCase_ : List[str] = output.images
UpperCAmelCase_ : Any = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Any = np.array([0.5_8_1_8, 0.6_2_8_5, 0.6_8_3_5, 0.6_0_1_9, 0.6_2_5, 0.6_7_5_4, 0.6_0_9_6, 0.6_3_3_4, 0.6_5_6_1] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 345 | 1 |
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
UpperCamelCase_ = '''\
@inproceedings{pillutla-etal:mauve:neurips2021,
title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},
author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},
booktitle = {NeurIPS},
year = {2021}
}
'''
UpperCamelCase_ = '''\
MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.
MAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.
For details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).
This metrics is a wrapper around the official implementation of MAUVE:
https://github.com/krishnap25/mauve
'''
UpperCamelCase_ = '''
Calculates MAUVE scores between two lists of generated text and reference text.
Args:
predictions: list of generated text to score. Each predictions
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
Optional Args:
num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer
pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1
kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9
kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5
kmeans_max_iter: maximum number of k-means iterations. Default 500
featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].
device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU
max_text_length: maximum number of tokens to consider. Default 1024
divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25
mauve_scaling_factor: "c" from the paper. Default 5.
verbose: If True (default), print running time updates
seed: random seed to initialize k-means cluster assignments.
Returns:
mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,
frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,
divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,
p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,
q_hist: same as above, but with q_text.
Examples:
>>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest
>>> import datasets
>>> mauve = datasets.load_metric(\'mauve\')
>>> predictions = ["hello there", "general kenobi"]
>>> references = ["hello there", "general kenobi"]
>>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP
>>> print(out.mauve) # doctest: +SKIP
1.0
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
'''simple docstring'''
def A__ ( self: int ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,homepage="""https://github.com/krishnap25/mauve""" ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" ,id="""sequence""" ),
"""references""": datasets.Value("""string""" ,id="""sequence""" ),
} ) ,codebase_urls=["""https://github.com/krishnap25/mauve"""] ,reference_urls=[
"""https://arxiv.org/abs/2102.01454""",
"""https://github.com/krishnap25/mauve""",
] ,)
def A__ ( self: List[str] ,lowerCamelCase_: Any ,lowerCamelCase_: List[str] ,lowerCamelCase_: Optional[Any]=None ,lowerCamelCase_: int=None ,lowerCamelCase_: str=None ,lowerCamelCase_: List[str]=None ,lowerCamelCase_: Any="auto" ,lowerCamelCase_: str=-1 ,lowerCamelCase_: Optional[int]=0.9 ,lowerCamelCase_: Any=5 ,lowerCamelCase_: Union[str, Any]=500 ,lowerCamelCase_: Union[str, Any]="gpt2-large" ,lowerCamelCase_: int=-1 ,lowerCamelCase_: Optional[Any]=1024 ,lowerCamelCase_: Any=25 ,lowerCamelCase_: List[str]=5 ,lowerCamelCase_: Union[str, Any]=True ,lowerCamelCase_: Dict=25 ,) -> Tuple:
UpperCAmelCase_ : Optional[Any] = compute_mauve(
p_text=lowerCamelCase_ ,q_text=lowerCamelCase_ ,p_features=lowerCamelCase_ ,q_features=lowerCamelCase_ ,p_tokens=lowerCamelCase_ ,q_tokens=lowerCamelCase_ ,num_buckets=lowerCamelCase_ ,pca_max_data=lowerCamelCase_ ,kmeans_explained_var=lowerCamelCase_ ,kmeans_num_redo=lowerCamelCase_ ,kmeans_max_iter=lowerCamelCase_ ,featurize_model_name=lowerCamelCase_ ,device_id=lowerCamelCase_ ,max_text_length=lowerCamelCase_ ,divergence_curve_discretization_size=lowerCamelCase_ ,mauve_scaling_factor=lowerCamelCase_ ,verbose=lowerCamelCase_ ,seed=lowerCamelCase_ ,)
return out
| 345 |
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class _snake_case :
'''simple docstring'''
def __init__( self: Optional[int] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Tuple=13 ,lowerCamelCase_: int=7 ,lowerCamelCase_: Union[str, Any]=True ,lowerCamelCase_: Dict=True ,lowerCamelCase_: str=True ,lowerCamelCase_: Tuple=True ,lowerCamelCase_: int=99 ,lowerCamelCase_: List[str]=64 ,lowerCamelCase_: Tuple=32 ,lowerCamelCase_: List[str]=5 ,lowerCamelCase_: str=4 ,lowerCamelCase_: str=37 ,lowerCamelCase_: Union[str, Any]="gelu" ,lowerCamelCase_: Union[str, Any]=0.1 ,lowerCamelCase_: str=0.1 ,lowerCamelCase_: List[str]=512 ,lowerCamelCase_: Dict=16 ,lowerCamelCase_: List[str]=2 ,lowerCamelCase_: List[str]=0.0_2 ,lowerCamelCase_: Optional[Any]=3 ,lowerCamelCase_: Union[str, Any]=4 ,lowerCamelCase_: str=None ,) -> List[str]:
UpperCAmelCase_ : Any = parent
UpperCAmelCase_ : List[Any] = batch_size
UpperCAmelCase_ : Union[str, Any] = seq_length
UpperCAmelCase_ : Optional[int] = is_training
UpperCAmelCase_ : Dict = use_input_mask
UpperCAmelCase_ : Any = use_token_type_ids
UpperCAmelCase_ : Tuple = use_labels
UpperCAmelCase_ : List[Any] = vocab_size
UpperCAmelCase_ : str = hidden_size
UpperCAmelCase_ : List[str] = embedding_size
UpperCAmelCase_ : List[Any] = num_hidden_layers
UpperCAmelCase_ : List[Any] = num_attention_heads
UpperCAmelCase_ : List[Any] = intermediate_size
UpperCAmelCase_ : Tuple = hidden_act
UpperCAmelCase_ : str = hidden_dropout_prob
UpperCAmelCase_ : List[str] = attention_probs_dropout_prob
UpperCAmelCase_ : Any = max_position_embeddings
UpperCAmelCase_ : List[str] = type_vocab_size
UpperCAmelCase_ : Any = type_sequence_label_size
UpperCAmelCase_ : Optional[Any] = initializer_range
UpperCAmelCase_ : Optional[int] = num_labels
UpperCAmelCase_ : Optional[int] = num_choices
UpperCAmelCase_ : List[str] = scope
def A__ ( self: Any ) -> Optional[int]:
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase_ : List[str] = None
if self.use_input_mask:
UpperCAmelCase_ : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ : Dict = None
if self.use_token_type_ids:
UpperCAmelCase_ : str = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
UpperCAmelCase_ : int = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : Union[str, Any] = None
if self.use_labels:
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
UpperCAmelCase_ : int = ids_tensor([self.batch_size] ,self.num_choices )
UpperCAmelCase_ : Tuple = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self: Any ) -> Dict:
return MobileBertConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,embedding_size=self.embedding_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=lowerCamelCase_ ,initializer_range=self.initializer_range ,)
def A__ ( self: List[Any] ,lowerCamelCase_: str ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: Any ,lowerCamelCase_: List[Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: str ,lowerCamelCase_: str ) -> int:
UpperCAmelCase_ : Any = MobileBertModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : List[Any] = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ ,token_type_ids=lowerCamelCase_ )
UpperCAmelCase_ : Tuple = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def A__ ( self: Optional[Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Tuple ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Dict ) -> int:
UpperCAmelCase_ : Union[str, Any] = MobileBertForMaskedLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def A__ ( self: str ,lowerCamelCase_: Any ,lowerCamelCase_: Dict ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: str ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: int ) -> int:
UpperCAmelCase_ : List[Any] = MobileBertForNextSentencePrediction(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(
lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, 2) )
def A__ ( self: Tuple ,lowerCamelCase_: Tuple ,lowerCamelCase_: Dict ,lowerCamelCase_: List[str] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Tuple ,lowerCamelCase_: Dict ,lowerCamelCase_: Any ) -> Optional[Any]:
UpperCAmelCase_ : Tuple = MobileBertForPreTraining(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Optional[int] = model(
lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ ,next_sentence_label=lowerCamelCase_ ,)
self.parent.assertEqual(result.prediction_logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape ,(self.batch_size, 2) )
def A__ ( self: Any ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: Any ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Any ,lowerCamelCase_: int ,lowerCamelCase_: List[Any] ) -> List[str]:
UpperCAmelCase_ : Optional[Any] = MobileBertForQuestionAnswering(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : int = model(
lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,start_positions=lowerCamelCase_ ,end_positions=lowerCamelCase_ ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def A__ ( self: List[str] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Any ,lowerCamelCase_: Tuple ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Any ) -> str:
UpperCAmelCase_ : Optional[Any] = self.num_labels
UpperCAmelCase_ : Union[str, Any] = MobileBertForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Optional[int] = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def A__ ( self: Union[str, Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: str ,lowerCamelCase_: Dict ,lowerCamelCase_: Any ,lowerCamelCase_: List[str] ) -> Any:
UpperCAmelCase_ : str = self.num_labels
UpperCAmelCase_ : Optional[int] = MobileBertForTokenClassification(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : List[Any] = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def A__ ( self: Tuple ,lowerCamelCase_: str ,lowerCamelCase_: int ,lowerCamelCase_: Tuple ,lowerCamelCase_: List[Any] ,lowerCamelCase_: str ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: List[Any] ) -> Union[str, Any]:
UpperCAmelCase_ : Union[str, Any] = self.num_choices
UpperCAmelCase_ : Tuple = MobileBertForMultipleChoice(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Dict = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
UpperCAmelCase_ : Union[str, Any] = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
UpperCAmelCase_ : str = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
UpperCAmelCase_ : Optional[int] = model(
lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def A__ ( self: List[str] ) -> str:
UpperCAmelCase_ : str = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : Union[str, Any] = config_and_inputs
UpperCAmelCase_ : Dict = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _snake_case ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
A__ : Dict = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
A__ : List[str] = (
{
"feature-extraction": MobileBertModel,
"fill-mask": MobileBertForMaskedLM,
"question-answering": MobileBertForQuestionAnswering,
"text-classification": MobileBertForSequenceClassification,
"token-classification": MobileBertForTokenClassification,
"zero-shot": MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
A__ : List[str] = True
def A__ ( self: Dict ,lowerCamelCase_: Tuple ,lowerCamelCase_: Tuple ,lowerCamelCase_: int=False ) -> Union[str, Any]:
UpperCAmelCase_ : List[Any] = super()._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ,return_labels=lowerCamelCase_ )
if return_labels:
if model_class in get_values(lowerCamelCase_ ):
UpperCAmelCase_ : Any = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) ,dtype=torch.long ,device=lowerCamelCase_ )
UpperCAmelCase_ : List[str] = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=lowerCamelCase_ )
return inputs_dict
def A__ ( self: List[str] ) -> Any:
UpperCAmelCase_ : List[str] = MobileBertModelTester(self )
UpperCAmelCase_ : Union[str, Any] = ConfigTester(self ,config_class=lowerCamelCase_ ,hidden_size=37 )
def A__ ( self: Optional[Any] ) -> List[Any]:
self.config_tester.run_common_tests()
def A__ ( self: List[str] ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*lowerCamelCase_ )
def A__ ( self: Optional[int] ) -> Optional[int]:
UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*lowerCamelCase_ )
def A__ ( self: Optional[Any] ) -> Tuple:
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*lowerCamelCase_ )
def A__ ( self: List[Any] ) -> List[str]:
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*lowerCamelCase_ )
def A__ ( self: Optional[Any] ) -> Dict:
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*lowerCamelCase_ )
def A__ ( self: Optional[int] ) -> Optional[int]:
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*lowerCamelCase_ )
def A__ ( self: Union[str, Any] ) -> Optional[int]:
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*lowerCamelCase_ )
def A__ ( self: Any ) -> Optional[int]:
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*lowerCamelCase_ )
def lowerCamelCase_ ( _a : Union[str, Any] ):
'''simple docstring'''
return torch.tensor(
_a , dtype=torch.long , device=_a , )
UpperCamelCase_ = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
@slow
def A__ ( self: List[Any] ) -> str:
UpperCAmelCase_ : Any = MobileBertModel.from_pretrained("""google/mobilebert-uncased""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : str = _long_tensor([[101, 7110, 1005, 1056, 2023, 11333, 17413, 1029, 102]] )
with torch.no_grad():
UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ )[0]
UpperCAmelCase_ : Union[str, Any] = torch.Size((1, 9, 512) )
self.assertEqual(output.shape ,lowerCamelCase_ )
UpperCAmelCase_ : Tuple = torch.tensor(
[
[
[-2.473_6526e07, 8.269_1656e04, 1.652_1838e05],
[-5.754_1704e-01, 3.905_6022e00, 4.401_1507e00],
[2.604_7359e00, 1.567_7652e00, -1.732_4188e-01],
]
] ,device=lowerCamelCase_ ,)
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
UpperCAmelCase_ : Dict = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
UpperCAmelCase_ : Dict = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 345 | 1 |
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'''google/umt5-small''': '''https://huggingface.co/google/umt5-small/resolve/main/config.json''',
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : List[Any] = "umt5"
A__ : Optional[Any] = ["past_key_values"]
def __init__( self: Dict ,lowerCamelCase_: Dict=250112 ,lowerCamelCase_: Optional[Any]=512 ,lowerCamelCase_: Union[str, Any]=64 ,lowerCamelCase_: Any=1024 ,lowerCamelCase_: Dict=8 ,lowerCamelCase_: int=None ,lowerCamelCase_: Optional[Any]=6 ,lowerCamelCase_: Union[str, Any]=32 ,lowerCamelCase_: int=128 ,lowerCamelCase_: Union[str, Any]=0.1 ,lowerCamelCase_: Optional[int]=1e-6 ,lowerCamelCase_: Tuple=1.0 ,lowerCamelCase_: Tuple="gated-gelu" ,lowerCamelCase_: Dict=True ,lowerCamelCase_: Dict=True ,lowerCamelCase_: Optional[Any]="T5Tokenizer" ,lowerCamelCase_: int=True ,lowerCamelCase_: List[str]=0 ,lowerCamelCase_: Optional[int]=1 ,lowerCamelCase_: List[Any]=0 ,**lowerCamelCase_: List[Any] ,) -> Dict:
super().__init__(
is_encoder_decoder=lowerCamelCase_ ,tokenizer_class=lowerCamelCase_ ,tie_word_embeddings=lowerCamelCase_ ,pad_token_id=lowerCamelCase_ ,eos_token_id=lowerCamelCase_ ,decoder_start_token_id=lowerCamelCase_ ,**lowerCamelCase_ ,)
UpperCAmelCase_ : Union[str, Any] = vocab_size
UpperCAmelCase_ : Optional[int] = d_model
UpperCAmelCase_ : Tuple = d_kv
UpperCAmelCase_ : Optional[int] = d_ff
UpperCAmelCase_ : str = num_layers
UpperCAmelCase_ : List[Any] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
UpperCAmelCase_ : Union[str, Any] = num_heads
UpperCAmelCase_ : int = relative_attention_num_buckets
UpperCAmelCase_ : Union[str, Any] = relative_attention_max_distance
UpperCAmelCase_ : Optional[int] = dropout_rate
UpperCAmelCase_ : str = layer_norm_epsilon
UpperCAmelCase_ : str = initializer_factor
UpperCAmelCase_ : Optional[Any] = feed_forward_proj
UpperCAmelCase_ : Dict = use_cache
UpperCAmelCase_ : Union[str, Any] = self.feed_forward_proj.split("""-""" )
UpperCAmelCase_ : Any = act_info[-1]
UpperCAmelCase_ : str = act_info[0] == """gated"""
if len(lowerCamelCase_ ) > 1 and act_info[0] != "gated" or len(lowerCamelCase_ ) > 2:
raise ValueError(
F'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
"""Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. """
"""'gated-gelu' or 'relu'""" )
if feed_forward_proj == "gated-gelu":
UpperCAmelCase_ : List[Any] = """gelu_new"""
@property
def A__ ( self: List[Any] ) -> Dict:
return self.d_model
@property
def A__ ( self: Union[str, Any] ) -> Dict:
return self.num_heads
@property
def A__ ( self: List[str] ) -> Tuple:
return self.num_layers
class _snake_case ( __snake_case ):
'''simple docstring'''
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def A__ ( self: str ) -> Mapping[str, Mapping[int, str]]:
UpperCAmelCase_ : int = {
"""input_ids""": {0: """batch""", 1: """encoder_sequence"""},
"""attention_mask""": {0: """batch""", 1: """encoder_sequence"""},
}
if self.use_past:
UpperCAmelCase_ : Tuple = """past_encoder_sequence + sequence"""
UpperCAmelCase_ : Tuple = {0: """batch"""}
UpperCAmelCase_ : Dict = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
UpperCAmelCase_ : Optional[int] = {0: """batch""", 1: """decoder_sequence"""}
UpperCAmelCase_ : Optional[int] = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase_ ,direction="""inputs""" )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def A__ ( self: int ) -> int:
return 13
@property
def A__ ( self: Dict ) -> float:
return 5e-4
| 345 |
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: str ) -> int:
UpperCAmelCase_ : List[Any] = """ylacombe/bark-small"""
UpperCAmelCase_ : Tuple = tempfile.mkdtemp()
UpperCAmelCase_ : Union[str, Any] = """en_speaker_1"""
UpperCAmelCase_ : Optional[Any] = """This is a test string"""
UpperCAmelCase_ : int = """speaker_embeddings_path.json"""
UpperCAmelCase_ : Any = """speaker_embeddings"""
def A__ ( self: Tuple ,**lowerCamelCase_: List[str] ) -> List[Any]:
return AutoTokenizer.from_pretrained(self.checkpoint ,**lowerCamelCase_ )
def A__ ( self: str ) -> Union[str, Any]:
shutil.rmtree(self.tmpdirname )
def A__ ( self: List[Any] ) -> int:
UpperCAmelCase_ : int = self.get_tokenizer()
UpperCAmelCase_ : Tuple = BarkProcessor(tokenizer=lowerCamelCase_ )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : Optional[int] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer.get_vocab() )
@slow
def A__ ( self: List[Any] ) -> Optional[int]:
UpperCAmelCase_ : List[Any] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint ,speaker_embeddings_dict_path=self.speaker_embeddings_dict_path ,)
processor.save_pretrained(
self.tmpdirname ,speaker_embeddings_dict_path=self.speaker_embeddings_dict_path ,speaker_embeddings_directory=self.speaker_embeddings_directory ,)
UpperCAmelCase_ : Optional[Any] = self.get_tokenizer(bos_token="""(BOS)""" ,eos_token="""(EOS)""" )
UpperCAmelCase_ : List[Any] = BarkProcessor.from_pretrained(
self.tmpdirname ,self.speaker_embeddings_dict_path ,bos_token="""(BOS)""" ,eos_token="""(EOS)""" ,)
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
def A__ ( self: List[str] ) -> Optional[Any]:
UpperCAmelCase_ : Any = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint ,speaker_embeddings_dict_path=self.speaker_embeddings_dict_path ,)
UpperCAmelCase_ : Optional[int] = 35
UpperCAmelCase_ : Optional[int] = 2
UpperCAmelCase_ : Dict = 8
UpperCAmelCase_ : Optional[int] = {
"""semantic_prompt""": np.ones(lowerCamelCase_ ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
UpperCAmelCase_ : str = processor(text=self.input_string ,voice_preset=lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() ,processed_voice_preset.get(lowerCamelCase_ ,np.array([] ) ).tolist() )
# test loading voice preset from npz file
UpperCAmelCase_ : List[Any] = os.path.join(self.tmpdirname ,"""file.npz""" )
np.savez(lowerCamelCase_ ,**lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = processor(text=self.input_string ,voice_preset=lowerCamelCase_ )
UpperCAmelCase_ : int = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() ,processed_voice_preset.get(lowerCamelCase_ ,np.array([] ) ).tolist() )
# test loading voice preset from the hub
UpperCAmelCase_ : Union[str, Any] = processor(text=self.input_string ,voice_preset=self.voice_preset )
def A__ ( self: Dict ) -> Tuple:
UpperCAmelCase_ : Any = self.get_tokenizer()
UpperCAmelCase_ : Dict = BarkProcessor(tokenizer=lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = processor(text=self.input_string )
UpperCAmelCase_ : str = tokenizer(
self.input_string ,padding="""max_length""" ,max_length=256 ,add_special_tokens=lowerCamelCase_ ,return_attention_mask=lowerCamelCase_ ,return_token_type_ids=lowerCamelCase_ ,)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key].squeeze().tolist() )
| 345 | 1 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class _snake_case :
'''simple docstring'''
def __init__( self: str ,lowerCamelCase_: List[str] ,lowerCamelCase_: Union[str, Any]=2 ,lowerCamelCase_: Any=True ,lowerCamelCase_: Any=False ,lowerCamelCase_: Union[str, Any]=10 ,lowerCamelCase_: List[str]=3 ,lowerCamelCase_: str=32 * 4 ,lowerCamelCase_: Optional[int]=32 * 6 ,lowerCamelCase_: str=4 ,lowerCamelCase_: str=32 ,) -> int:
UpperCAmelCase_ : Optional[Any] = parent
UpperCAmelCase_ : Tuple = batch_size
UpperCAmelCase_ : Optional[int] = is_training
UpperCAmelCase_ : Optional[Any] = use_auxiliary_loss
UpperCAmelCase_ : str = num_queries
UpperCAmelCase_ : Optional[int] = num_channels
UpperCAmelCase_ : Tuple = min_size
UpperCAmelCase_ : int = max_size
UpperCAmelCase_ : int = num_labels
UpperCAmelCase_ : Optional[Any] = mask_feature_size
def A__ ( self: Any ) -> Any:
UpperCAmelCase_ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
lowerCamelCase_ )
UpperCAmelCase_ : int = torch.ones([self.batch_size, self.min_size, self.max_size] ,device=lowerCamelCase_ )
UpperCAmelCase_ : List[str] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] ,device=lowerCamelCase_ ) > 0.5
).float()
UpperCAmelCase_ : Any = (torch.rand((self.batch_size, self.num_labels) ,device=lowerCamelCase_ ) > 0.5).long()
UpperCAmelCase_ : List[str] = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def A__ ( self: Union[str, Any] ) -> Optional[Any]:
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] ,) ,decoder_config=DetrConfig(
decoder_ffn_dim=128 ,num_queries=self.num_queries ,decoder_attention_heads=2 ,d_model=self.mask_feature_size ,) ,mask_feature_size=self.mask_feature_size ,fpn_feature_size=self.mask_feature_size ,num_channels=self.num_channels ,num_labels=self.num_labels ,)
def A__ ( self: Dict ) -> Optional[int]:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = self.prepare_config_and_inputs()
UpperCAmelCase_ : Any = {"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask}
return config, inputs_dict
def A__ ( self: Union[str, Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: List[str] ) -> Optional[Any]:
UpperCAmelCase_ : Dict = output.encoder_hidden_states
UpperCAmelCase_ : Any = output.pixel_decoder_hidden_states
UpperCAmelCase_ : Dict = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(lowerCamelCase_ ) ,len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCamelCase_ ) ,len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCamelCase_ ) ,config.decoder_config.decoder_layers )
def A__ ( self: Union[str, Any] ,lowerCamelCase_: str ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: str=False ) -> Dict:
with torch.no_grad():
UpperCAmelCase_ : str = MaskFormerModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Dict = model(pixel_values=lowerCamelCase_ ,pixel_mask=lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = model(lowerCamelCase_ ,output_hidden_states=lowerCamelCase_ )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape ,(self.batch_size, self.num_queries, self.mask_feature_size) ,)
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(lowerCamelCase_ ,lowerCamelCase_ )
def A__ ( self: Tuple ,lowerCamelCase_: str ,lowerCamelCase_: List[str] ,lowerCamelCase_: List[str] ,lowerCamelCase_: List[str] ,lowerCamelCase_: str ) -> List[str]:
UpperCAmelCase_ : Optional[int] = MaskFormerForInstanceSegmentation(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
def comm_check_on_output(lowerCamelCase_: Optional[int] ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape ,(self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) ,)
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape ,(self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
UpperCAmelCase_ : Dict = model(pixel_values=lowerCamelCase_ ,pixel_mask=lowerCamelCase_ )
UpperCAmelCase_ : Any = model(lowerCamelCase_ )
comm_check_on_output(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = model(
pixel_values=lowerCamelCase_ ,pixel_mask=lowerCamelCase_ ,mask_labels=lowerCamelCase_ ,class_labels=lowerCamelCase_ )
comm_check_on_output(lowerCamelCase_ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape ,torch.Size([1] ) )
@require_torch
class _snake_case ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
A__ : str = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
A__ : Any = (
{"feature-extraction": MaskFormerModel, "image-segmentation": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
A__ : Dict = False
A__ : List[Any] = False
A__ : int = False
A__ : int = False
def A__ ( self: Dict ) -> int:
UpperCAmelCase_ : List[Any] = MaskFormerModelTester(self )
UpperCAmelCase_ : List[Any] = ConfigTester(self ,config_class=lowerCamelCase_ ,has_text_modality=lowerCamelCase_ )
def A__ ( self: List[str] ) -> List[Any]:
self.config_tester.run_common_tests()
def A__ ( self: Optional[Any] ) -> Optional[Any]:
UpperCAmelCase_ , UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(lowerCamelCase_ ,**lowerCamelCase_ ,output_hidden_states=lowerCamelCase_ )
def A__ ( self: int ) -> Union[str, Any]:
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*lowerCamelCase_ )
@unittest.skip(reason="""MaskFormer does not use inputs_embeds""" )
def A__ ( self: int ) -> str:
pass
@unittest.skip(reason="""MaskFormer does not have a get_input_embeddings method""" )
def A__ ( self: List[str] ) -> Dict:
pass
@unittest.skip(reason="""MaskFormer is not a generative model""" )
def A__ ( self: List[Any] ) -> List[str]:
pass
@unittest.skip(reason="""MaskFormer does not use token embeddings""" )
def A__ ( self: Dict ) -> Optional[Any]:
pass
@require_torch_multi_gpu
@unittest.skip(
reason="""MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def A__ ( self: str ) -> Optional[Any]:
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def A__ ( self: List[Any] ) -> List[str]:
pass
def A__ ( self: Dict ) -> Dict:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : str = model_class(lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : Optional[int] = [*signature.parameters.keys()]
UpperCAmelCase_ : int = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,lowerCamelCase_ )
@slow
def A__ ( self: int ) -> str:
for model_name in ["facebook/maskformer-swin-small-coco"]:
UpperCAmelCase_ : str = MaskFormerModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def A__ ( self: Optional[int] ) -> Optional[int]:
UpperCAmelCase_ : Any = (self.model_tester.min_size,) * 2
UpperCAmelCase_ : List[str] = {
"""pixel_values""": torch.randn((2, 3, *size) ,device=lowerCamelCase_ ),
"""mask_labels""": torch.randn((2, 10, *size) ,device=lowerCamelCase_ ),
"""class_labels""": torch.zeros(2 ,10 ,device=lowerCamelCase_ ).long(),
}
UpperCAmelCase_ : Union[str, Any] = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(lowerCamelCase_ )
UpperCAmelCase_ : Dict = model(**lowerCamelCase_ )
self.assertTrue(outputs.loss is not None )
def A__ ( self: Tuple ) -> List[Any]:
UpperCAmelCase_ , UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(lowerCamelCase_ ,**lowerCamelCase_ ,output_hidden_states=lowerCamelCase_ )
def A__ ( self: Tuple ) -> Any:
UpperCAmelCase_ , UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Any = model_class(lowerCamelCase_ ).to(lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = model(**lowerCamelCase_ ,output_attentions=lowerCamelCase_ )
self.assertTrue(outputs.attentions is not None )
def A__ ( self: Optional[int] ) -> List[Any]:
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
UpperCAmelCase_ : str = self.all_model_classes[1]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
UpperCAmelCase_ : Tuple = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.train()
UpperCAmelCase_ : Optional[int] = model(lowerCamelCase_ ,mask_labels=lowerCamelCase_ ,class_labels=lowerCamelCase_ ).loss
loss.backward()
def A__ ( self: Optional[Any] ) -> int:
# only MaskFormerForInstanceSegmentation has the loss
UpperCAmelCase_ : Optional[int] = self.all_model_classes[1]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
UpperCAmelCase_ : Tuple = True
UpperCAmelCase_ : Tuple = True
UpperCAmelCase_ : int = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.train()
UpperCAmelCase_ : Optional[int] = model(lowerCamelCase_ ,mask_labels=lowerCamelCase_ ,class_labels=lowerCamelCase_ )
UpperCAmelCase_ : List[str] = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
UpperCAmelCase_ : Optional[int] = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
UpperCAmelCase_ : List[str] = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
UpperCAmelCase_ : List[Any] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=lowerCamelCase_ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
UpperCamelCase_ = 1E-4
def lowerCamelCase_ ( ):
'''simple docstring'''
UpperCAmelCase_ : str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_vision
@slow
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def A__ ( self: Optional[Any] ) -> int:
return (
MaskFormerImageProcessor.from_pretrained("""facebook/maskformer-swin-small-coco""" )
if is_vision_available()
else None
)
def A__ ( self: Union[str, Any] ) -> Optional[int]:
UpperCAmelCase_ : Union[str, Any] = MaskFormerModel.from_pretrained("""facebook/maskformer-swin-small-coco""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = self.default_image_processor
UpperCAmelCase_ : int = prepare_img()
UpperCAmelCase_ : Union[str, Any] = image_processor(lowerCamelCase_ ,return_tensors="""pt""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCamelCase_ ,(1, 3, 800, 1088) )
with torch.no_grad():
UpperCAmelCase_ : Any = model(**lowerCamelCase_ )
UpperCAmelCase_ : Tuple = torch.tensor(
[[-0.0_4_8_2, 0.9_2_2_8, 0.4_9_5_1], [-0.2_5_4_7, 0.8_0_1_7, 0.8_5_2_7], [-0.0_0_6_9, 0.3_3_8_5, -0.0_0_8_9]] ).to(lowerCamelCase_ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] ,lowerCamelCase_ ,atol=lowerCamelCase_ ) )
UpperCAmelCase_ : Tuple = torch.tensor(
[[-0.8_4_2_2, -0.8_4_3_4, -0.9_7_1_8], [-1.0_1_4_4, -0.5_5_6_5, -0.4_1_9_5], [-1.0_0_3_8, -0.4_4_8_4, -0.1_9_6_1]] ).to(lowerCamelCase_ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] ,lowerCamelCase_ ,atol=lowerCamelCase_ ) )
UpperCAmelCase_ : List[str] = torch.tensor(
[[0.2_8_5_2, -0.0_1_5_9, 0.9_7_3_5], [0.6_2_5_4, 0.1_8_5_8, 0.8_5_2_9], [-0.0_6_8_0, -0.4_1_1_6, 1.8_4_1_3]] ).to(lowerCamelCase_ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] ,lowerCamelCase_ ,atol=lowerCamelCase_ ) )
def A__ ( self: int ) -> Dict:
UpperCAmelCase_ : str = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" )
.to(lowerCamelCase_ )
.eval()
)
UpperCAmelCase_ : Union[str, Any] = self.default_image_processor
UpperCAmelCase_ : Union[str, Any] = prepare_img()
UpperCAmelCase_ : Any = image_processor(lowerCamelCase_ ,return_tensors="""pt""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCamelCase_ ,(1, 3, 800, 1088) )
with torch.no_grad():
UpperCAmelCase_ : Optional[Any] = model(**lowerCamelCase_ )
# masks_queries_logits
UpperCAmelCase_ : Dict = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape ,(1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) ,)
UpperCAmelCase_ : List[str] = [
[-1.3_7_3_7_1_2_4, -1.7_7_2_4_9_3_7, -1.9_3_6_4_2_3_3],
[-1.5_9_7_7_2_8_1, -1.9_8_6_7_9_3_9, -2.1_5_2_3_6_9_5],
[-1.5_7_9_5_3_9_8, -1.9_2_6_9_8_3_2, -2.0_9_3_9_4_2],
]
UpperCAmelCase_ : str = torch.tensor(lowerCamelCase_ ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] ,lowerCamelCase_ ,atol=lowerCamelCase_ ) )
# class_queries_logits
UpperCAmelCase_ : int = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape ,(1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
UpperCAmelCase_ : List[str] = torch.tensor(
[
[1.6512e00, -5.2572e00, -3.3519e00],
[3.6169e-02, -5.9025e00, -2.9313e00],
[1.0766e-04, -7.7630e00, -5.1263e00],
] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] ,lowerCamelCase_ ,atol=lowerCamelCase_ ) )
def A__ ( self: Union[str, Any] ) -> List[str]:
UpperCAmelCase_ : Union[str, Any] = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-resnet101-coco-stuff""" )
.to(lowerCamelCase_ )
.eval()
)
UpperCAmelCase_ : List[Any] = self.default_image_processor
UpperCAmelCase_ : Optional[int] = prepare_img()
UpperCAmelCase_ : str = image_processor(lowerCamelCase_ ,return_tensors="""pt""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCamelCase_ ,(1, 3, 800, 1088) )
with torch.no_grad():
UpperCAmelCase_ : Tuple = model(**lowerCamelCase_ )
# masks_queries_logits
UpperCAmelCase_ : Optional[int] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape ,(1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) ,)
UpperCAmelCase_ : List[Any] = [[-0.9_0_4_6, -2.6_3_6_6, -4.6_0_6_2], [-3.4_1_7_9, -5.7_8_9_0, -8.8_0_5_7], [-4.9_1_7_9, -7.6_5_6_0, -1_0.7_7_1_1]]
UpperCAmelCase_ : Optional[int] = torch.tensor(lowerCamelCase_ ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] ,lowerCamelCase_ ,atol=lowerCamelCase_ ) )
# class_queries_logits
UpperCAmelCase_ : List[str] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape ,(1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
UpperCAmelCase_ : Optional[int] = torch.tensor(
[[4.7_1_8_8, -3.2_5_8_5, -2.8_8_5_7], [6.6_8_7_1, -2.9_1_8_1, -1.2_4_8_7], [7.2_4_4_9, -2.2_7_6_4, -2.1_8_7_4]] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] ,lowerCamelCase_ ,atol=lowerCamelCase_ ) )
def A__ ( self: List[str] ) -> Any:
UpperCAmelCase_ : Optional[Any] = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" )
.to(lowerCamelCase_ )
.eval()
)
UpperCAmelCase_ : Union[str, Any] = self.default_image_processor
UpperCAmelCase_ : int = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] ,segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] ,return_tensors="""pt""" ,)
UpperCAmelCase_ : Any = inputs["""pixel_values"""].to(lowerCamelCase_ )
UpperCAmelCase_ : Tuple = [el.to(lowerCamelCase_ ) for el in inputs["""mask_labels"""]]
UpperCAmelCase_ : str = [el.to(lowerCamelCase_ ) for el in inputs["""class_labels"""]]
with torch.no_grad():
UpperCAmelCase_ : str = model(**lowerCamelCase_ )
self.assertTrue(outputs.loss is not None )
| 345 |
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: Optional[int] ) -> Any:
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
UpperCAmelCase_ : Union[str, Any] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : str = -1
UpperCAmelCase_ : Dict = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ )
UpperCAmelCase_ : Any = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
UpperCAmelCase_ : List[Any] = TextStreamer(lowerCamelCase_ )
model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ ,streamer=lowerCamelCase_ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
UpperCAmelCase_ : Optional[int] = cs.out[:-1]
self.assertEqual(lowerCamelCase_ ,lowerCamelCase_ )
def A__ ( self: Dict ) -> Optional[Any]:
UpperCAmelCase_ : str = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
UpperCAmelCase_ : Optional[Any] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = -1
UpperCAmelCase_ : List[Any] = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(lowerCamelCase_ )
UpperCAmelCase_ : List[str] = model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ )
UpperCAmelCase_ : Dict = tokenizer.decode(greedy_ids[0] )
UpperCAmelCase_ : str = TextIteratorStreamer(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer}
UpperCAmelCase_ : str = Thread(target=model.generate ,kwargs=lowerCamelCase_ )
thread.start()
UpperCAmelCase_ : int = """"""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(lowerCamelCase_ ,lowerCamelCase_ )
def A__ ( self: List[Any] ) -> Dict:
UpperCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
UpperCAmelCase_ : Optional[Any] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = -1
UpperCAmelCase_ : Tuple = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(lowerCamelCase_ )
UpperCAmelCase_ : Dict = model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ )
UpperCAmelCase_ : str = greedy_ids[:, input_ids.shape[1] :]
UpperCAmelCase_ : Dict = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
UpperCAmelCase_ : List[Any] = TextStreamer(lowerCamelCase_ ,skip_prompt=lowerCamelCase_ )
model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ ,streamer=lowerCamelCase_ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
UpperCAmelCase_ : List[str] = cs.out[:-1]
self.assertEqual(lowerCamelCase_ ,lowerCamelCase_ )
def A__ ( self: str ) -> str:
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
UpperCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained("""distilgpt2""" )
UpperCAmelCase_ : Optional[Any] = AutoModelForCausalLM.from_pretrained("""distilgpt2""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : Any = -1
UpperCAmelCase_ : Union[str, Any] = torch.ones((1, 5) ,device=lowerCamelCase_ ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
UpperCAmelCase_ : Union[str, Any] = TextStreamer(lowerCamelCase_ ,skip_special_tokens=lowerCamelCase_ )
model.generate(lowerCamelCase_ ,max_new_tokens=1 ,do_sample=lowerCamelCase_ ,streamer=lowerCamelCase_ )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
UpperCAmelCase_ : List[str] = cs.out[:-1] # Remove the final "\n"
UpperCAmelCase_ : Dict = tokenizer(lowerCamelCase_ ,return_tensors="""pt""" )
self.assertEqual(streamer_text_tokenized.input_ids.shape ,(1, 1) )
def A__ ( self: List[str] ) -> Any:
UpperCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
UpperCAmelCase_ : Any = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : List[str] = -1
UpperCAmelCase_ : Optional[Any] = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = TextIteratorStreamer(lowerCamelCase_ ,timeout=0.0_0_1 )
UpperCAmelCase_ : Any = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer}
UpperCAmelCase_ : Dict = Thread(target=model.generate ,kwargs=lowerCamelCase_ )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(lowerCamelCase_ ):
UpperCAmelCase_ : Union[str, Any] = """"""
for new_text in streamer:
streamer_text += new_text
| 345 | 1 |
from sklearn.metrics import fa_score
import datasets
UpperCamelCase_ = '''
The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:
F1 = 2 * (precision * recall) / (precision + recall)
'''
UpperCamelCase_ = '''
Args:
predictions (`list` of `int`): Predicted labels.
references (`list` of `int`): Ground truth labels.
labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.
pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.
average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.
- \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.
- \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.
- \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.
- \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
sample_weight (`list` of `float`): Sample weights Defaults to None.
Returns:
f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.
Examples:
Example 1-A simple binary example
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])
>>> print(results)
{\'f1\': 0.5}
Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)
>>> print(round(results[\'f1\'], 2))
0.67
Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])
>>> print(round(results[\'f1\'], 2))
0.35
Example 4-A multiclass example, with different values for the `average` input.
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")
>>> print(round(results[\'f1\'], 2))
0.33
>>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{\'f1\': array([0.8, 0. , 0. ])}
'''
UpperCamelCase_ = '''
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
'''simple docstring'''
def A__ ( self: Tuple ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""int32""" ) ),
"""references""": datasets.Sequence(datasets.Value("""int32""" ) ),
}
if self.config_name == """multilabel"""
else {
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) ,reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html"""] ,)
def A__ ( self: Any ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: Dict ,lowerCamelCase_: int=None ,lowerCamelCase_: str=1 ,lowerCamelCase_: List[Any]="binary" ,lowerCamelCase_: List[str]=None ) -> Union[str, Any]:
UpperCAmelCase_ : str = fa_score(
lowerCamelCase_ ,lowerCamelCase_ ,labels=lowerCamelCase_ ,pos_label=lowerCamelCase_ ,average=lowerCamelCase_ ,sample_weight=lowerCamelCase_ )
return {"f1": float(lowerCamelCase_ ) if score.size == 1 else score}
| 345 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
@property
def A__ ( self: Optional[int] ) -> int:
torch.manual_seed(0 )
UpperCAmelCase_ : Union[str, Any] = UNetaDModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") ,up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") ,)
return model
@property
def A__ ( self: Tuple ) -> Optional[Any]:
torch.manual_seed(0 )
UpperCAmelCase_ : List[str] = VQModel(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=3 ,)
return model
@property
def A__ ( self: Tuple ) -> Any:
torch.manual_seed(0 )
UpperCAmelCase_ : int = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,)
return CLIPTextModel(lowerCamelCase_ )
def A__ ( self: str ) -> Optional[Any]:
UpperCAmelCase_ : str = self.dummy_uncond_unet
UpperCAmelCase_ : List[Any] = DDIMScheduler()
UpperCAmelCase_ : List[Any] = self.dummy_vq_model
UpperCAmelCase_ : Optional[int] = LDMPipeline(unet=lowerCamelCase_ ,vqvae=lowerCamelCase_ ,scheduler=lowerCamelCase_ )
ldm.to(lowerCamelCase_ )
ldm.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : Any = torch.manual_seed(0 )
UpperCAmelCase_ : int = ldm(generator=lowerCamelCase_ ,num_inference_steps=2 ,output_type="""numpy""" ).images
UpperCAmelCase_ : List[str] = torch.manual_seed(0 )
UpperCAmelCase_ : Union[str, Any] = ldm(generator=lowerCamelCase_ ,num_inference_steps=2 ,output_type="""numpy""" ,return_dict=lowerCamelCase_ )[0]
UpperCAmelCase_ : Optional[Any] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ : str = np.array([0.8_5_1_2, 0.8_1_8, 0.6_4_1_1, 0.6_8_0_8, 0.4_4_6_5, 0.5_6_1_8, 0.4_6, 0.6_2_3_1, 0.5_1_7_2] )
UpperCAmelCase_ : Tuple = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: Optional[int] ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = LDMPipeline.from_pretrained("""CompVis/ldm-celebahq-256""" )
ldm.to(lowerCamelCase_ )
ldm.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = torch.manual_seed(0 )
UpperCAmelCase_ : Optional[int] = ldm(generator=lowerCamelCase_ ,num_inference_steps=5 ,output_type="""numpy""" ).images
UpperCAmelCase_ : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCAmelCase_ : int = np.array([0.4_3_9_9, 0.4_4_9_7_5, 0.4_6_8_2_5, 0.4_7_4, 0.4_3_5_9, 0.4_5_8_1, 0.4_5_0_9_5, 0.4_3_4_1, 0.4_4_4_7] )
UpperCAmelCase_ : Union[str, Any] = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 345 | 1 |
def lowerCamelCase_ ( _a : int , _a : int ):
'''simple docstring'''
UpperCAmelCase_ : int = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
UpperCAmelCase_ : Union[str, Any] = n - k
# Calculate C(n,k)
for i in range(_a ):
result *= n - i
result //= i + 1
return result
def lowerCamelCase_ ( _a : int ):
'''simple docstring'''
return binomial_coefficient(2 * node_count , _a ) // (node_count + 1)
def lowerCamelCase_ ( _a : int ):
'''simple docstring'''
if n < 0:
raise ValueError("""factorial() not defined for negative values""" )
UpperCAmelCase_ : Any = 1
for i in range(1 , n + 1 ):
result *= i
return result
def lowerCamelCase_ ( _a : int ):
'''simple docstring'''
return catalan_number(_a ) * factorial(_a )
if __name__ == "__main__":
UpperCamelCase_ = int(input('''Enter the number of nodes: ''').strip() or 0)
if node_count <= 0:
raise ValueError('''We need some nodes to work with.''')
print(
F"Given {node_count} nodes, there are {binary_tree_count(node_count)} "
F"binary trees and {catalan_number(node_count)} binary search trees."
)
| 345 |
def lowerCamelCase_ ( _a : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = [0] * len(_a )
UpperCAmelCase_ : Dict = []
UpperCAmelCase_ : Optional[int] = []
UpperCAmelCase_ : Dict = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(_a ) ):
if indegree[i] == 0:
queue.append(_a )
while queue:
UpperCAmelCase_ : List[str] = queue.pop(0 )
cnt += 1
topo.append(_a )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(_a )
if cnt != len(_a ):
print("""Cycle exists""" )
else:
print(_a )
# Adjacency List of Graph
UpperCamelCase_ = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 345 | 1 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 345 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'''microsoft/swinv2-tiny-patch4-window8-256''': (
'''https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json'''
),
}
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : Optional[Any] = "swinv2"
A__ : int = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self: List[str] ,lowerCamelCase_: List[str]=224 ,lowerCamelCase_: List[str]=4 ,lowerCamelCase_: List[Any]=3 ,lowerCamelCase_: Optional[Any]=96 ,lowerCamelCase_: Any=[2, 2, 6, 2] ,lowerCamelCase_: Dict=[3, 6, 12, 24] ,lowerCamelCase_: str=7 ,lowerCamelCase_: Optional[Any]=4.0 ,lowerCamelCase_: Tuple=True ,lowerCamelCase_: List[str]=0.0 ,lowerCamelCase_: Optional[int]=0.0 ,lowerCamelCase_: List[str]=0.1 ,lowerCamelCase_: str="gelu" ,lowerCamelCase_: str=False ,lowerCamelCase_: Dict=0.0_2 ,lowerCamelCase_: Union[str, Any]=1e-5 ,lowerCamelCase_: str=32 ,**lowerCamelCase_: List[str] ,) -> Tuple:
super().__init__(**lowerCamelCase_ )
UpperCAmelCase_ : Tuple = image_size
UpperCAmelCase_ : Tuple = patch_size
UpperCAmelCase_ : Dict = num_channels
UpperCAmelCase_ : List[Any] = embed_dim
UpperCAmelCase_ : Dict = depths
UpperCAmelCase_ : Dict = len(lowerCamelCase_ )
UpperCAmelCase_ : str = num_heads
UpperCAmelCase_ : Tuple = window_size
UpperCAmelCase_ : int = mlp_ratio
UpperCAmelCase_ : str = qkv_bias
UpperCAmelCase_ : Any = hidden_dropout_prob
UpperCAmelCase_ : Tuple = attention_probs_dropout_prob
UpperCAmelCase_ : int = drop_path_rate
UpperCAmelCase_ : Optional[Any] = hidden_act
UpperCAmelCase_ : List[str] = use_absolute_embeddings
UpperCAmelCase_ : Dict = layer_norm_eps
UpperCAmelCase_ : int = initializer_range
UpperCAmelCase_ : Union[str, Any] = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCAmelCase_ : List[str] = int(embed_dim * 2 ** (len(lowerCamelCase_ ) - 1) )
UpperCAmelCase_ : Any = (0, 0, 0, 0)
| 345 | 1 |
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: int ) -> str:
# A mock response for an HTTP head request to emulate server down
UpperCAmelCase_ : List[str] = mock.Mock()
UpperCAmelCase_ : List[Any] = 500
UpperCAmelCase_ : Union[str, Any] = {}
UpperCAmelCase_ : Union[str, Any] = HTTPError
UpperCAmelCase_ : Any = {}
# Download this model to make sure it's in the cache.
UpperCAmelCase_ : Union[str, Any] = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("""requests.Session.request""" ,return_value=lowerCamelCase_ ) as mock_head:
UpperCAmelCase_ : Any = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def A__ ( self: str ) -> int:
# A mock response for an HTTP head request to emulate server down
UpperCAmelCase_ : str = mock.Mock()
UpperCAmelCase_ : Optional[int] = 500
UpperCAmelCase_ : int = {}
UpperCAmelCase_ : Union[str, Any] = HTTPError
UpperCAmelCase_ : List[Any] = {}
# Download this model to make sure it's in the cache.
UpperCAmelCase_ : Optional[int] = GPTaTokenizerFast.from_pretrained("""gpt2""" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("""requests.Session.request""" ,return_value=lowerCamelCase_ ) as mock_head:
UpperCAmelCase_ : Any = GPTaTokenizerFast.from_pretrained("""gpt2""" )
# This check we did call the fake head request
mock_head.assert_called()
def A__ ( self: str ) -> Dict:
# This test is for deprecated behavior and can be removed in v5
try:
UpperCAmelCase_ : Any = tempfile.mktemp()
with open(lowerCamelCase_ ,"""wb""" ) as f:
http_get("""https://huggingface.co/albert-base-v1/resolve/main/spiece.model""" ,lowerCamelCase_ )
UpperCAmelCase_ : Tuple = AlbertTokenizer.from_pretrained(lowerCamelCase_ )
finally:
os.remove(lowerCamelCase_ )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile("""tokenizer.json""" ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open("""tokenizer.json""" ,"""wb""" ) as f:
http_get("""https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json""" ,lowerCamelCase_ )
UpperCAmelCase_ : str = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size ,1000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove("""tokenizer.json""" )
def A__ ( self: List[str] ) -> Tuple:
# This test is for deprecated behavior and can be removed in v5
UpperCAmelCase_ : str = AlbertTokenizer.from_pretrained("""https://huggingface.co/albert-base-v1/resolve/main/spiece.model""" )
@is_staging_test
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
A__ : str = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def A__ ( cls: Dict ) -> Optional[int]:
UpperCAmelCase_ : List[str] = TOKEN
HfFolder.save_token(lowerCamelCase_ )
@classmethod
def A__ ( cls: Optional[Any] ) -> List[str]:
try:
delete_repo(token=cls._token ,repo_id="""test-tokenizer""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id="""valid_org/test-tokenizer-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id="""test-dynamic-tokenizer""" )
except HTTPError:
pass
def A__ ( self: Any ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ : Tuple = os.path.join(lowerCamelCase_ ,"""vocab.txt""" )
with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
UpperCAmelCase_ : List[Any] = BertTokenizer(lowerCamelCase_ )
tokenizer.push_to_hub("""test-tokenizer""" ,use_auth_token=self._token )
UpperCAmelCase_ : List[Any] = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
# Reset repo
delete_repo(token=self._token ,repo_id="""test-tokenizer""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowerCamelCase_ ,repo_id="""test-tokenizer""" ,push_to_hub=lowerCamelCase_ ,use_auth_token=self._token )
UpperCAmelCase_ : List[Any] = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
def A__ ( self: Optional[int] ) -> Any:
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ : List[Any] = os.path.join(lowerCamelCase_ ,"""vocab.txt""" )
with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
UpperCAmelCase_ : Dict = BertTokenizer(lowerCamelCase_ )
tokenizer.push_to_hub("""valid_org/test-tokenizer-org""" ,use_auth_token=self._token )
UpperCAmelCase_ : Dict = BertTokenizer.from_pretrained("""valid_org/test-tokenizer-org""" )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
# Reset repo
delete_repo(token=self._token ,repo_id="""valid_org/test-tokenizer-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
lowerCamelCase_ ,repo_id="""valid_org/test-tokenizer-org""" ,push_to_hub=lowerCamelCase_ ,use_auth_token=self._token )
UpperCAmelCase_ : List[Any] = BertTokenizer.from_pretrained("""valid_org/test-tokenizer-org""" )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
@require_tokenizers
def A__ ( self: Optional[int] ) -> Optional[Any]:
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ : Any = os.path.join(lowerCamelCase_ ,"""vocab.txt""" )
with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
UpperCAmelCase_ : Optional[Any] = CustomTokenizer(lowerCamelCase_ )
# No fast custom tokenizer
tokenizer.push_to_hub("""test-dynamic-tokenizer""" ,use_auth_token=self._token )
UpperCAmelCase_ : Optional[Any] = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''' ,trust_remote_code=lowerCamelCase_ )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,"""CustomTokenizer""" )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ : List[str] = os.path.join(lowerCamelCase_ ,"""vocab.txt""" )
with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
UpperCAmelCase_ : str = BertTokenizerFast.from_pretrained(lowerCamelCase_ )
bert_tokenizer.save_pretrained(lowerCamelCase_ )
UpperCAmelCase_ : List[str] = CustomTokenizerFast.from_pretrained(lowerCamelCase_ )
tokenizer.push_to_hub("""test-dynamic-tokenizer""" ,use_auth_token=self._token )
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''' ,trust_remote_code=lowerCamelCase_ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,"""CustomTokenizerFast""" )
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(
F'''{USER}/test-dynamic-tokenizer''' ,use_fast=lowerCamelCase_ ,trust_remote_code=lowerCamelCase_ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,"""CustomTokenizer""" )
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: Optional[Any] ) -> Any:
UpperCAmelCase_ : Any = Trie()
trie.add("""Hello 友達""" )
self.assertEqual(trie.data ,{"""H""": {"""e""": {"""l""": {"""l""": {"""o""": {""" """: {"""友""": {"""達""": {"""""": 1}}}}}}}}} )
trie.add("""Hello""" )
trie.data
self.assertEqual(trie.data ,{"""H""": {"""e""": {"""l""": {"""l""": {"""o""": {"""""": 1, """ """: {"""友""": {"""達""": {"""""": 1}}}}}}}}} )
def A__ ( self: Tuple ) -> Optional[int]:
UpperCAmelCase_ : str = Trie()
self.assertEqual(trie.split("""[CLS] This is a extra_id_100""" ) ,["""[CLS] This is a extra_id_100"""] )
trie.add("""[CLS]""" )
trie.add("""extra_id_1""" )
trie.add("""extra_id_100""" )
self.assertEqual(trie.split("""[CLS] This is a extra_id_100""" ) ,["""[CLS]""", """ This is a """, """extra_id_100"""] )
def A__ ( self: Optional[Any] ) -> Optional[int]:
UpperCAmelCase_ : Dict = Trie()
trie.add("""A""" )
self.assertEqual(trie.split("""ABC""" ) ,["""A""", """BC"""] )
self.assertEqual(trie.split("""BCA""" ) ,["""BC""", """A"""] )
def A__ ( self: Union[str, Any] ) -> int:
UpperCAmelCase_ : List[str] = Trie()
trie.add("""TOKEN]""" )
trie.add("""[SPECIAL_TOKEN]""" )
self.assertEqual(trie.split("""This is something [SPECIAL_TOKEN]""" ) ,["""This is something """, """[SPECIAL_TOKEN]"""] )
def A__ ( self: int ) -> Union[str, Any]:
UpperCAmelCase_ : List[str] = Trie()
trie.add("""A""" )
trie.add("""P""" )
trie.add("""[SPECIAL_TOKEN]""" )
self.assertEqual(trie.split("""This is something [SPECIAL_TOKEN]""" ) ,["""This is something """, """[SPECIAL_TOKEN]"""] )
def A__ ( self: int ) -> List[str]:
UpperCAmelCase_ : int = Trie()
trie.add("""AB""" )
trie.add("""B""" )
trie.add("""C""" )
self.assertEqual(trie.split("""ABC""" ) ,["""AB""", """C"""] )
def A__ ( self: str ) -> Optional[int]:
UpperCAmelCase_ : Optional[Any] = Trie()
trie.add("""ABC""" )
trie.add("""B""" )
trie.add("""CD""" )
self.assertEqual(trie.split("""ABCD""" ) ,["""ABC""", """D"""] )
def A__ ( self: List[Any] ) -> Any:
# Even if the offsets are wrong, we necessarily output correct string
# parts.
UpperCAmelCase_ : Tuple = Trie()
UpperCAmelCase_ : Optional[Any] = trie.cut_text("""ABC""" ,[0, 0, 2, 1, 2, 3] )
self.assertEqual(lowerCamelCase_ ,["""AB""", """C"""] )
| 345 |
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: int ) -> str:
# A mock response for an HTTP head request to emulate server down
UpperCAmelCase_ : List[str] = mock.Mock()
UpperCAmelCase_ : List[Any] = 500
UpperCAmelCase_ : Union[str, Any] = {}
UpperCAmelCase_ : Union[str, Any] = HTTPError
UpperCAmelCase_ : Any = {}
# Download this model to make sure it's in the cache.
UpperCAmelCase_ : Union[str, Any] = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("""requests.Session.request""" ,return_value=lowerCamelCase_ ) as mock_head:
UpperCAmelCase_ : Any = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def A__ ( self: str ) -> int:
# A mock response for an HTTP head request to emulate server down
UpperCAmelCase_ : str = mock.Mock()
UpperCAmelCase_ : Optional[int] = 500
UpperCAmelCase_ : int = {}
UpperCAmelCase_ : Union[str, Any] = HTTPError
UpperCAmelCase_ : List[Any] = {}
# Download this model to make sure it's in the cache.
UpperCAmelCase_ : Optional[int] = GPTaTokenizerFast.from_pretrained("""gpt2""" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("""requests.Session.request""" ,return_value=lowerCamelCase_ ) as mock_head:
UpperCAmelCase_ : Any = GPTaTokenizerFast.from_pretrained("""gpt2""" )
# This check we did call the fake head request
mock_head.assert_called()
def A__ ( self: str ) -> Dict:
# This test is for deprecated behavior and can be removed in v5
try:
UpperCAmelCase_ : Any = tempfile.mktemp()
with open(lowerCamelCase_ ,"""wb""" ) as f:
http_get("""https://huggingface.co/albert-base-v1/resolve/main/spiece.model""" ,lowerCamelCase_ )
UpperCAmelCase_ : Tuple = AlbertTokenizer.from_pretrained(lowerCamelCase_ )
finally:
os.remove(lowerCamelCase_ )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile("""tokenizer.json""" ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open("""tokenizer.json""" ,"""wb""" ) as f:
http_get("""https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json""" ,lowerCamelCase_ )
UpperCAmelCase_ : str = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size ,1000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove("""tokenizer.json""" )
def A__ ( self: List[str] ) -> Tuple:
# This test is for deprecated behavior and can be removed in v5
UpperCAmelCase_ : str = AlbertTokenizer.from_pretrained("""https://huggingface.co/albert-base-v1/resolve/main/spiece.model""" )
@is_staging_test
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
A__ : str = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def A__ ( cls: Dict ) -> Optional[int]:
UpperCAmelCase_ : List[str] = TOKEN
HfFolder.save_token(lowerCamelCase_ )
@classmethod
def A__ ( cls: Optional[Any] ) -> List[str]:
try:
delete_repo(token=cls._token ,repo_id="""test-tokenizer""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id="""valid_org/test-tokenizer-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id="""test-dynamic-tokenizer""" )
except HTTPError:
pass
def A__ ( self: Any ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ : Tuple = os.path.join(lowerCamelCase_ ,"""vocab.txt""" )
with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
UpperCAmelCase_ : List[Any] = BertTokenizer(lowerCamelCase_ )
tokenizer.push_to_hub("""test-tokenizer""" ,use_auth_token=self._token )
UpperCAmelCase_ : List[Any] = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
# Reset repo
delete_repo(token=self._token ,repo_id="""test-tokenizer""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowerCamelCase_ ,repo_id="""test-tokenizer""" ,push_to_hub=lowerCamelCase_ ,use_auth_token=self._token )
UpperCAmelCase_ : List[Any] = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
def A__ ( self: Optional[int] ) -> Any:
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ : List[Any] = os.path.join(lowerCamelCase_ ,"""vocab.txt""" )
with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
UpperCAmelCase_ : Dict = BertTokenizer(lowerCamelCase_ )
tokenizer.push_to_hub("""valid_org/test-tokenizer-org""" ,use_auth_token=self._token )
UpperCAmelCase_ : Dict = BertTokenizer.from_pretrained("""valid_org/test-tokenizer-org""" )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
# Reset repo
delete_repo(token=self._token ,repo_id="""valid_org/test-tokenizer-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
lowerCamelCase_ ,repo_id="""valid_org/test-tokenizer-org""" ,push_to_hub=lowerCamelCase_ ,use_auth_token=self._token )
UpperCAmelCase_ : List[Any] = BertTokenizer.from_pretrained("""valid_org/test-tokenizer-org""" )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
@require_tokenizers
def A__ ( self: Optional[int] ) -> Optional[Any]:
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ : Any = os.path.join(lowerCamelCase_ ,"""vocab.txt""" )
with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
UpperCAmelCase_ : Optional[Any] = CustomTokenizer(lowerCamelCase_ )
# No fast custom tokenizer
tokenizer.push_to_hub("""test-dynamic-tokenizer""" ,use_auth_token=self._token )
UpperCAmelCase_ : Optional[Any] = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''' ,trust_remote_code=lowerCamelCase_ )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,"""CustomTokenizer""" )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ : List[str] = os.path.join(lowerCamelCase_ ,"""vocab.txt""" )
with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
UpperCAmelCase_ : str = BertTokenizerFast.from_pretrained(lowerCamelCase_ )
bert_tokenizer.save_pretrained(lowerCamelCase_ )
UpperCAmelCase_ : List[str] = CustomTokenizerFast.from_pretrained(lowerCamelCase_ )
tokenizer.push_to_hub("""test-dynamic-tokenizer""" ,use_auth_token=self._token )
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''' ,trust_remote_code=lowerCamelCase_ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,"""CustomTokenizerFast""" )
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(
F'''{USER}/test-dynamic-tokenizer''' ,use_fast=lowerCamelCase_ ,trust_remote_code=lowerCamelCase_ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,"""CustomTokenizer""" )
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: Optional[Any] ) -> Any:
UpperCAmelCase_ : Any = Trie()
trie.add("""Hello 友達""" )
self.assertEqual(trie.data ,{"""H""": {"""e""": {"""l""": {"""l""": {"""o""": {""" """: {"""友""": {"""達""": {"""""": 1}}}}}}}}} )
trie.add("""Hello""" )
trie.data
self.assertEqual(trie.data ,{"""H""": {"""e""": {"""l""": {"""l""": {"""o""": {"""""": 1, """ """: {"""友""": {"""達""": {"""""": 1}}}}}}}}} )
def A__ ( self: Tuple ) -> Optional[int]:
UpperCAmelCase_ : str = Trie()
self.assertEqual(trie.split("""[CLS] This is a extra_id_100""" ) ,["""[CLS] This is a extra_id_100"""] )
trie.add("""[CLS]""" )
trie.add("""extra_id_1""" )
trie.add("""extra_id_100""" )
self.assertEqual(trie.split("""[CLS] This is a extra_id_100""" ) ,["""[CLS]""", """ This is a """, """extra_id_100"""] )
def A__ ( self: Optional[Any] ) -> Optional[int]:
UpperCAmelCase_ : Dict = Trie()
trie.add("""A""" )
self.assertEqual(trie.split("""ABC""" ) ,["""A""", """BC"""] )
self.assertEqual(trie.split("""BCA""" ) ,["""BC""", """A"""] )
def A__ ( self: Union[str, Any] ) -> int:
UpperCAmelCase_ : List[str] = Trie()
trie.add("""TOKEN]""" )
trie.add("""[SPECIAL_TOKEN]""" )
self.assertEqual(trie.split("""This is something [SPECIAL_TOKEN]""" ) ,["""This is something """, """[SPECIAL_TOKEN]"""] )
def A__ ( self: int ) -> Union[str, Any]:
UpperCAmelCase_ : List[str] = Trie()
trie.add("""A""" )
trie.add("""P""" )
trie.add("""[SPECIAL_TOKEN]""" )
self.assertEqual(trie.split("""This is something [SPECIAL_TOKEN]""" ) ,["""This is something """, """[SPECIAL_TOKEN]"""] )
def A__ ( self: int ) -> List[str]:
UpperCAmelCase_ : int = Trie()
trie.add("""AB""" )
trie.add("""B""" )
trie.add("""C""" )
self.assertEqual(trie.split("""ABC""" ) ,["""AB""", """C"""] )
def A__ ( self: str ) -> Optional[int]:
UpperCAmelCase_ : Optional[Any] = Trie()
trie.add("""ABC""" )
trie.add("""B""" )
trie.add("""CD""" )
self.assertEqual(trie.split("""ABCD""" ) ,["""ABC""", """D"""] )
def A__ ( self: List[Any] ) -> Any:
# Even if the offsets are wrong, we necessarily output correct string
# parts.
UpperCAmelCase_ : Tuple = Trie()
UpperCAmelCase_ : Optional[Any] = trie.cut_text("""ABC""" ,[0, 0, 2, 1, 2, 3] )
self.assertEqual(lowerCamelCase_ ,["""AB""", """C"""] )
| 345 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'''facebook/deit-base-distilled-patch16-224''': (
'''https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json'''
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : Any = "deit"
def __init__( self: Optional[Any] ,lowerCamelCase_: Optional[int]=768 ,lowerCamelCase_: Any=12 ,lowerCamelCase_: str=12 ,lowerCamelCase_: Optional[int]=3072 ,lowerCamelCase_: Any="gelu" ,lowerCamelCase_: Union[str, Any]=0.0 ,lowerCamelCase_: List[str]=0.0 ,lowerCamelCase_: Dict=0.0_2 ,lowerCamelCase_: Optional[Any]=1e-12 ,lowerCamelCase_: Any=224 ,lowerCamelCase_: Dict=16 ,lowerCamelCase_: str=3 ,lowerCamelCase_: Any=True ,lowerCamelCase_: str=16 ,**lowerCamelCase_: Union[str, Any] ,) -> Optional[int]:
super().__init__(**lowerCamelCase_ )
UpperCAmelCase_ : str = hidden_size
UpperCAmelCase_ : Optional[Any] = num_hidden_layers
UpperCAmelCase_ : Any = num_attention_heads
UpperCAmelCase_ : Dict = intermediate_size
UpperCAmelCase_ : str = hidden_act
UpperCAmelCase_ : List[Any] = hidden_dropout_prob
UpperCAmelCase_ : List[str] = attention_probs_dropout_prob
UpperCAmelCase_ : int = initializer_range
UpperCAmelCase_ : Any = layer_norm_eps
UpperCAmelCase_ : Optional[int] = image_size
UpperCAmelCase_ : Optional[int] = patch_size
UpperCAmelCase_ : Tuple = num_channels
UpperCAmelCase_ : List[Any] = qkv_bias
UpperCAmelCase_ : int = encoder_stride
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : Union[str, Any] = version.parse("1.11" )
@property
def A__ ( self: List[Any] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def A__ ( self: List[Any] ) -> float:
return 1e-4
| 345 |
from ..utils import DummyObject, requires_backends
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Tuple = ["flax"]
def __init__( self: str ,*lowerCamelCase_: int ,**lowerCamelCase_: List[str] ) -> str:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Optional[Any] ,*lowerCamelCase_: Dict ,**lowerCamelCase_: List[str] ) -> Any:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: Optional[int] ,*lowerCamelCase_: Optional[int] ,**lowerCamelCase_: int ) -> Optional[int]:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Any = ["flax"]
def __init__( self: int ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: Tuple ) -> Union[str, Any]:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Optional[int] ,*lowerCamelCase_: Optional[int] ,**lowerCamelCase_: List[str] ) -> Union[str, Any]:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: Tuple ,*lowerCamelCase_: Tuple ,**lowerCamelCase_: Any ) -> int:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Dict = ["flax"]
def __init__( self: Dict ,*lowerCamelCase_: Optional[int] ,**lowerCamelCase_: List[Any] ) -> Any:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Tuple ,*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: List[Any] ) -> str:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: int ,*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: Optional[Any] ) -> int:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : List[str] = ["flax"]
def __init__( self: str ,*lowerCamelCase_: List[str] ,**lowerCamelCase_: Optional[int] ) -> Union[str, Any]:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Union[str, Any] ,*lowerCamelCase_: Any ,**lowerCamelCase_: Any ) -> Any:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: Dict ,*lowerCamelCase_: int ,**lowerCamelCase_: Optional[Any] ) -> int:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : int = ["flax"]
def __init__( self: Dict ,*lowerCamelCase_: Tuple ,**lowerCamelCase_: List[str] ) -> Optional[Any]:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Optional[Any] ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: str ) -> Any:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: Union[str, Any] ,*lowerCamelCase_: Dict ,**lowerCamelCase_: Optional[Any] ) -> str:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Optional[int] = ["flax"]
def __init__( self: str ,*lowerCamelCase_: Dict ,**lowerCamelCase_: Optional[int] ) -> Tuple:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: int ,*lowerCamelCase_: int ,**lowerCamelCase_: Tuple ) -> List[str]:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: str ,*lowerCamelCase_: Union[str, Any] ,**lowerCamelCase_: Optional[Any] ) -> Any:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : List[Any] = ["flax"]
def __init__( self: Union[str, Any] ,*lowerCamelCase_: Tuple ,**lowerCamelCase_: int ) -> List[Any]:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Tuple ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: Dict ) -> Dict:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: Dict ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: str ) -> Any:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Tuple = ["flax"]
def __init__( self: str ,*lowerCamelCase_: Any ,**lowerCamelCase_: int ) -> Tuple:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Dict ,*lowerCamelCase_: Optional[int] ,**lowerCamelCase_: Union[str, Any] ) -> List[str]:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: str ,*lowerCamelCase_: Union[str, Any] ,**lowerCamelCase_: Dict ) -> Optional[int]:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : str = ["flax"]
def __init__( self: Optional[Any] ,*lowerCamelCase_: str ,**lowerCamelCase_: List[str] ) -> Optional[Any]:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: List[str] ,*lowerCamelCase_: Dict ,**lowerCamelCase_: int ) -> List[str]:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: str ,*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: int ) -> Union[str, Any]:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Union[str, Any] = ["flax"]
def __init__( self: Any ,*lowerCamelCase_: Tuple ,**lowerCamelCase_: Optional[int] ) -> List[str]:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Optional[int] ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: str ) -> Union[str, Any]:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: List[Any] ,*lowerCamelCase_: Any ,**lowerCamelCase_: Any ) -> int:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Tuple = ["flax"]
def __init__( self: Any ,*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: Dict ) -> str:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Tuple ,*lowerCamelCase_: Union[str, Any] ,**lowerCamelCase_: List[str] ) -> int:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: List[Any] ,*lowerCamelCase_: str ,**lowerCamelCase_: str ) -> Any:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Optional[Any] = ["flax"]
def __init__( self: Dict ,*lowerCamelCase_: int ,**lowerCamelCase_: Optional[Any] ) -> Union[str, Any]:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: int ,*lowerCamelCase_: int ,**lowerCamelCase_: Tuple ) -> Union[str, Any]:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: Optional[Any] ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: Optional[int] ) -> int:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Optional[int] = ["flax"]
def __init__( self: List[str] ,*lowerCamelCase_: Dict ,**lowerCamelCase_: Dict ) -> int:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Dict ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: Dict ) -> Union[str, Any]:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: int ,*lowerCamelCase_: Any ,**lowerCamelCase_: Any ) -> Optional[Any]:
requires_backends(cls ,["""flax"""] )
| 345 | 1 |
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def lowerCamelCase_ ( _a : NDArray[floataa] , _a : NDArray[floataa] , _a : list[int] , _a : int , ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ : str = coefficient_matrix.shape
UpperCAmelCase_ , UpperCAmelCase_ : int = constant_matrix.shape
if rowsa != colsa:
UpperCAmelCase_ : str = F'''Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}'''
raise ValueError(_a )
if colsa != 1:
UpperCAmelCase_ : str = F'''Constant matrix must be nx1 but received {rowsa}x{colsa}'''
raise ValueError(_a )
if rowsa != rowsa:
UpperCAmelCase_ : str = (
"""Coefficient and constant matrices dimensions must be nxn and nx1 but """
F'''received {rowsa}x{colsa} and {rowsa}x{colsa}'''
)
raise ValueError(_a )
if len(_a ) != rowsa:
UpperCAmelCase_ : Dict = (
"""Number of initial values must be equal to number of rows in coefficient """
F'''matrix but received {len(_a )} and {rowsa}'''
)
raise ValueError(_a )
if iterations <= 0:
raise ValueError("""Iterations must be at least 1""" )
UpperCAmelCase_ : NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = table.shape
strictly_diagonally_dominant(_a )
# Iterates the whole matrix for given number of times
for _ in range(_a ):
UpperCAmelCase_ : Optional[Any] = []
for row in range(_a ):
UpperCAmelCase_ : int = 0
for col in range(_a ):
if col == row:
UpperCAmelCase_ : Tuple = table[row][col]
elif col == cols - 1:
UpperCAmelCase_ : Optional[int] = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
UpperCAmelCase_ : List[str] = (temp + val) / denom
new_val.append(_a )
UpperCAmelCase_ : List[str] = new_val
return [float(_a ) for i in new_val]
def lowerCamelCase_ ( _a : NDArray[floataa] ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = table.shape
UpperCAmelCase_ : Dict = True
for i in range(0 , _a ):
UpperCAmelCase_ : Optional[int] = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("""Coefficient matrix is not strictly diagonally dominant""" )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 345 |
import random
from typing import Any
def lowerCamelCase_ ( _a : list ):
'''simple docstring'''
for _ in range(len(_a ) ):
UpperCAmelCase_ : Tuple = random.randint(0 , len(_a ) - 1 )
UpperCAmelCase_ : List[Any] = random.randint(0 , len(_a ) - 1 )
UpperCAmelCase_ , UpperCAmelCase_ : int = data[b], data[a]
return data
if __name__ == "__main__":
UpperCamelCase_ = [0, 1, 2, 3, 4, 5, 6, 7]
UpperCamelCase_ = ['''python''', '''says''', '''hello''', '''!''']
print('''Fisher-Yates Shuffle:''')
print('''List''', integers, strings)
print('''FY Shuffle''', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 345 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase_ = {
'''configuration_funnel''': ['''FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FunnelConfig'''],
'''convert_funnel_original_tf_checkpoint_to_pytorch''': [],
'''tokenization_funnel''': ['''FunnelTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ['''FunnelTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'''FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FunnelBaseModel''',
'''FunnelForMaskedLM''',
'''FunnelForMultipleChoice''',
'''FunnelForPreTraining''',
'''FunnelForQuestionAnswering''',
'''FunnelForSequenceClassification''',
'''FunnelForTokenClassification''',
'''FunnelModel''',
'''FunnelPreTrainedModel''',
'''load_tf_weights_in_funnel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'''TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFFunnelBaseModel''',
'''TFFunnelForMaskedLM''',
'''TFFunnelForMultipleChoice''',
'''TFFunnelForPreTraining''',
'''TFFunnelForQuestionAnswering''',
'''TFFunnelForSequenceClassification''',
'''TFFunnelForTokenClassification''',
'''TFFunnelModel''',
'''TFFunnelPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 345 |
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class _snake_case ( nn.Module ):
'''simple docstring'''
A__ : int
A__ : int
A__ : float = 0.0
A__ : int = 1
A__ : int = 1
A__ : bool = True
A__ : bool = False
A__ : bool = False
A__ : bool = False
A__ : jnp.dtype = jnp.floataa
def A__ ( self: Dict ) -> List[str]:
UpperCAmelCase_ : Optional[int] = []
UpperCAmelCase_ : Optional[int] = []
for i in range(self.num_layers ):
UpperCAmelCase_ : List[Any] = self.in_channels if i == 0 else self.out_channels
UpperCAmelCase_ : List[Any] = FlaxResnetBlockaD(
in_channels=lowerCamelCase_ ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = FlaxTransformeraDModel(
in_channels=self.out_channels ,n_heads=self.num_attention_heads ,d_head=self.out_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,only_cross_attention=self.only_cross_attention ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
attentions.append(lowerCamelCase_ )
UpperCAmelCase_ : int = resnets
UpperCAmelCase_ : Tuple = attentions
if self.add_downsample:
UpperCAmelCase_ : List[Any] = FlaxDownsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self: Optional[Any] ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: str ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: int=True ) -> int:
UpperCAmelCase_ : List[Any] = ()
for resnet, attn in zip(self.resnets ,self.attentions ):
UpperCAmelCase_ : str = resnet(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = attn(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
output_states += (hidden_states,)
if self.add_downsample:
UpperCAmelCase_ : List[Any] = self.downsamplers_a(lowerCamelCase_ )
output_states += (hidden_states,)
return hidden_states, output_states
class _snake_case ( nn.Module ):
'''simple docstring'''
A__ : int
A__ : int
A__ : float = 0.0
A__ : int = 1
A__ : bool = True
A__ : jnp.dtype = jnp.floataa
def A__ ( self: Dict ) -> int:
UpperCAmelCase_ : List[str] = []
for i in range(self.num_layers ):
UpperCAmelCase_ : int = self.in_channels if i == 0 else self.out_channels
UpperCAmelCase_ : Dict = FlaxResnetBlockaD(
in_channels=lowerCamelCase_ ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = resnets
if self.add_downsample:
UpperCAmelCase_ : List[str] = FlaxDownsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self: Any ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Any ,lowerCamelCase_: List[Any]=True ) -> Any:
UpperCAmelCase_ : Union[str, Any] = ()
for resnet in self.resnets:
UpperCAmelCase_ : Tuple = resnet(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
output_states += (hidden_states,)
if self.add_downsample:
UpperCAmelCase_ : List[str] = self.downsamplers_a(lowerCamelCase_ )
output_states += (hidden_states,)
return hidden_states, output_states
class _snake_case ( nn.Module ):
'''simple docstring'''
A__ : int
A__ : int
A__ : int
A__ : float = 0.0
A__ : int = 1
A__ : int = 1
A__ : bool = True
A__ : bool = False
A__ : bool = False
A__ : bool = False
A__ : jnp.dtype = jnp.floataa
def A__ ( self: str ) -> Any:
UpperCAmelCase_ : Dict = []
UpperCAmelCase_ : List[str] = []
for i in range(self.num_layers ):
UpperCAmelCase_ : int = self.in_channels if (i == self.num_layers - 1) else self.out_channels
UpperCAmelCase_ : int = self.prev_output_channel if i == 0 else self.out_channels
UpperCAmelCase_ : Optional[Any] = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(lowerCamelCase_ )
UpperCAmelCase_ : int = FlaxTransformeraDModel(
in_channels=self.out_channels ,n_heads=self.num_attention_heads ,d_head=self.out_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,only_cross_attention=self.only_cross_attention ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
attentions.append(lowerCamelCase_ )
UpperCAmelCase_ : List[str] = resnets
UpperCAmelCase_ : Dict = attentions
if self.add_upsample:
UpperCAmelCase_ : Optional[Any] = FlaxUpsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self: Optional[int] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: int ,lowerCamelCase_: Any ,lowerCamelCase_: str ,lowerCamelCase_: List[str]=True ) -> List[str]:
for resnet, attn in zip(self.resnets ,self.attentions ):
# pop res hidden states
UpperCAmelCase_ : List[str] = res_hidden_states_tuple[-1]
UpperCAmelCase_ : Union[str, Any] = res_hidden_states_tuple[:-1]
UpperCAmelCase_ : Optional[Any] = jnp.concatenate((hidden_states, res_hidden_states) ,axis=-1 )
UpperCAmelCase_ : Tuple = resnet(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = attn(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
if self.add_upsample:
UpperCAmelCase_ : Dict = self.upsamplers_a(lowerCamelCase_ )
return hidden_states
class _snake_case ( nn.Module ):
'''simple docstring'''
A__ : int
A__ : int
A__ : int
A__ : float = 0.0
A__ : int = 1
A__ : bool = True
A__ : jnp.dtype = jnp.floataa
def A__ ( self: Dict ) -> Dict:
UpperCAmelCase_ : Any = []
for i in range(self.num_layers ):
UpperCAmelCase_ : str = self.in_channels if (i == self.num_layers - 1) else self.out_channels
UpperCAmelCase_ : Optional[int] = self.prev_output_channel if i == 0 else self.out_channels
UpperCAmelCase_ : Any = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(lowerCamelCase_ )
UpperCAmelCase_ : str = resnets
if self.add_upsample:
UpperCAmelCase_ : Union[str, Any] = FlaxUpsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self: Dict ,lowerCamelCase_: Dict ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Any=True ) -> List[str]:
for resnet in self.resnets:
# pop res hidden states
UpperCAmelCase_ : Dict = res_hidden_states_tuple[-1]
UpperCAmelCase_ : str = res_hidden_states_tuple[:-1]
UpperCAmelCase_ : List[Any] = jnp.concatenate((hidden_states, res_hidden_states) ,axis=-1 )
UpperCAmelCase_ : List[str] = resnet(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
if self.add_upsample:
UpperCAmelCase_ : Optional[Any] = self.upsamplers_a(lowerCamelCase_ )
return hidden_states
class _snake_case ( nn.Module ):
'''simple docstring'''
A__ : int
A__ : float = 0.0
A__ : int = 1
A__ : int = 1
A__ : bool = False
A__ : bool = False
A__ : jnp.dtype = jnp.floataa
def A__ ( self: Dict ) -> List[str]:
# there is always at least one resnet
UpperCAmelCase_ : List[Any] = [
FlaxResnetBlockaD(
in_channels=self.in_channels ,out_channels=self.in_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
]
UpperCAmelCase_ : Any = []
for _ in range(self.num_layers ):
UpperCAmelCase_ : Optional[Any] = FlaxTransformeraDModel(
in_channels=self.in_channels ,n_heads=self.num_attention_heads ,d_head=self.in_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
attentions.append(lowerCamelCase_ )
UpperCAmelCase_ : Any = FlaxResnetBlockaD(
in_channels=self.in_channels ,out_channels=self.in_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(lowerCamelCase_ )
UpperCAmelCase_ : Dict = resnets
UpperCAmelCase_ : Any = attentions
def __call__( self: str ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: str ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Union[str, Any]=True ) -> List[Any]:
UpperCAmelCase_ : List[Any] = self.resnets[0](lowerCamelCase_ ,lowerCamelCase_ )
for attn, resnet in zip(self.attentions ,self.resnets[1:] ):
UpperCAmelCase_ : Optional[Any] = attn(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = resnet(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
return hidden_states
| 345 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'''bigcode/gpt_bigcode-santacoder''': '''https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json''',
}
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : str = "gpt_bigcode"
A__ : str = ["past_key_values"]
A__ : Any = {
"hidden_size": "n_embd",
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self: Optional[int] ,lowerCamelCase_: List[Any]=50257 ,lowerCamelCase_: Optional[int]=1024 ,lowerCamelCase_: Any=768 ,lowerCamelCase_: Tuple=12 ,lowerCamelCase_: Optional[int]=12 ,lowerCamelCase_: Tuple=None ,lowerCamelCase_: int="gelu_pytorch_tanh" ,lowerCamelCase_: Tuple=0.1 ,lowerCamelCase_: str=0.1 ,lowerCamelCase_: str=0.1 ,lowerCamelCase_: List[str]=1e-5 ,lowerCamelCase_: Any=0.0_2 ,lowerCamelCase_: Optional[Any]=True ,lowerCamelCase_: Dict=True ,lowerCamelCase_: List[Any]=50256 ,lowerCamelCase_: Optional[int]=50256 ,lowerCamelCase_: List[str]=True ,lowerCamelCase_: List[str]=True ,lowerCamelCase_: List[str]=True ,**lowerCamelCase_: Optional[Any] ,) -> int:
UpperCAmelCase_ : str = vocab_size
UpperCAmelCase_ : Tuple = n_positions
UpperCAmelCase_ : Any = n_embd
UpperCAmelCase_ : int = n_layer
UpperCAmelCase_ : str = n_head
UpperCAmelCase_ : str = n_inner
UpperCAmelCase_ : Union[str, Any] = activation_function
UpperCAmelCase_ : Optional[Any] = resid_pdrop
UpperCAmelCase_ : Any = embd_pdrop
UpperCAmelCase_ : Dict = attn_pdrop
UpperCAmelCase_ : Union[str, Any] = layer_norm_epsilon
UpperCAmelCase_ : str = initializer_range
UpperCAmelCase_ : int = scale_attn_weights
UpperCAmelCase_ : int = use_cache
UpperCAmelCase_ : Optional[Any] = attention_softmax_in_fpaa
UpperCAmelCase_ : Dict = scale_attention_softmax_in_fpaa
UpperCAmelCase_ : List[Any] = multi_query
UpperCAmelCase_ : Optional[int] = bos_token_id
UpperCAmelCase_ : Dict = eos_token_id
super().__init__(bos_token_id=lowerCamelCase_ ,eos_token_id=lowerCamelCase_ ,**lowerCamelCase_ )
| 345 |
import pickle
import numpy as np
from matplotlib import pyplot as plt
class _snake_case :
'''simple docstring'''
def __init__( self: Any ,lowerCamelCase_: Dict ,lowerCamelCase_: Tuple ,lowerCamelCase_: Dict ,lowerCamelCase_: Tuple ,lowerCamelCase_: Any ,lowerCamelCase_: Tuple=0.2 ,lowerCamelCase_: Union[str, Any]=0.2 ) -> List[str]:
UpperCAmelCase_ : List[Any] = bp_numa
UpperCAmelCase_ : str = bp_numa
UpperCAmelCase_ : List[Any] = bp_numa
UpperCAmelCase_ : Optional[int] = conva_get[:2]
UpperCAmelCase_ : List[Any] = conva_get[2]
UpperCAmelCase_ : str = size_pa
UpperCAmelCase_ : Optional[int] = rate_w
UpperCAmelCase_ : Dict = rate_t
UpperCAmelCase_ : List[Any] = [
np.mat(-1 * np.random.rand(self.conva[0] ,self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
UpperCAmelCase_ : int = np.mat(-1 * np.random.rand(self.num_bpa ,self.num_bpa ) + 0.5 )
UpperCAmelCase_ : int = np.mat(-1 * np.random.rand(self.num_bpa ,self.num_bpa ) + 0.5 )
UpperCAmelCase_ : Dict = -2 * np.random.rand(self.conva[1] ) + 1
UpperCAmelCase_ : str = -2 * np.random.rand(self.num_bpa ) + 1
UpperCAmelCase_ : Union[str, Any] = -2 * np.random.rand(self.num_bpa ) + 1
def A__ ( self: str ,lowerCamelCase_: Optional[Any] ) -> Tuple:
# save model dict with pickle
UpperCAmelCase_ : Dict = {
"""num_bp1""": self.num_bpa,
"""num_bp2""": self.num_bpa,
"""num_bp3""": self.num_bpa,
"""conv1""": self.conva,
"""step_conv1""": self.step_conva,
"""size_pooling1""": self.size_poolinga,
"""rate_weight""": self.rate_weight,
"""rate_thre""": self.rate_thre,
"""w_conv1""": self.w_conva,
"""wkj""": self.wkj,
"""vji""": self.vji,
"""thre_conv1""": self.thre_conva,
"""thre_bp2""": self.thre_bpa,
"""thre_bp3""": self.thre_bpa,
}
with open(lowerCamelCase_ ,"""wb""" ) as f:
pickle.dump(lowerCamelCase_ ,lowerCamelCase_ )
print(F'''Model saved: {save_path}''' )
@classmethod
def A__ ( cls: List[str] ,lowerCamelCase_: str ) -> List[str]:
# read saved model
with open(lowerCamelCase_ ,"""rb""" ) as f:
UpperCAmelCase_ : Any = pickle.load(lowerCamelCase_ ) # noqa: S301
UpperCAmelCase_ : Union[str, Any] = model_dic.get("""conv1""" )
conv_get.append(model_dic.get("""step_conv1""" ) )
UpperCAmelCase_ : List[str] = model_dic.get("""size_pooling1""" )
UpperCAmelCase_ : Tuple = model_dic.get("""num_bp1""" )
UpperCAmelCase_ : Optional[Any] = model_dic.get("""num_bp2""" )
UpperCAmelCase_ : List[str] = model_dic.get("""num_bp3""" )
UpperCAmelCase_ : List[Any] = model_dic.get("""rate_weight""" )
UpperCAmelCase_ : Dict = model_dic.get("""rate_thre""" )
# create model instance
UpperCAmelCase_ : List[Any] = CNN(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
# modify model parameter
UpperCAmelCase_ : Any = model_dic.get("""w_conv1""" )
UpperCAmelCase_ : int = model_dic.get("""wkj""" )
UpperCAmelCase_ : int = model_dic.get("""vji""" )
UpperCAmelCase_ : Optional[int] = model_dic.get("""thre_conv1""" )
UpperCAmelCase_ : List[str] = model_dic.get("""thre_bp2""" )
UpperCAmelCase_ : Dict = model_dic.get("""thre_bp3""" )
return conv_ins
def A__ ( self: List[Any] ,lowerCamelCase_: Union[str, Any] ) -> Tuple:
return 1 / (1 + np.exp(-1 * x ))
def A__ ( self: Union[str, Any] ,lowerCamelCase_: Union[str, Any] ) -> Optional[Any]:
return round(lowerCamelCase_ ,3 )
def A__ ( self: Tuple ,lowerCamelCase_: Any ,lowerCamelCase_: List[str] ,lowerCamelCase_: str ,lowerCamelCase_: Any ,lowerCamelCase_: Union[str, Any] ) -> Any:
# convolution process
UpperCAmelCase_ : Optional[Any] = convs[0]
UpperCAmelCase_ : int = convs[1]
UpperCAmelCase_ : int = np.shape(lowerCamelCase_ )[0]
# get the data slice of original image data, data_focus
UpperCAmelCase_ : Dict = []
for i_focus in range(0 ,size_data - size_conv + 1 ,lowerCamelCase_ ):
for j_focus in range(0 ,size_data - size_conv + 1 ,lowerCamelCase_ ):
UpperCAmelCase_ : Union[str, Any] = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(lowerCamelCase_ )
# calculate the feature map of every single kernel, and saved as list of matrix
UpperCAmelCase_ : Any = []
UpperCAmelCase_ : Tuple = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(lowerCamelCase_ ):
UpperCAmelCase_ : Optional[int] = []
for i_focus in range(len(lowerCamelCase_ ) ):
UpperCAmelCase_ : int = (
np.sum(np.multiply(data_focus[i_focus] ,w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(lowerCamelCase_ ) )
UpperCAmelCase_ : Union[str, Any] = np.asmatrix(lowerCamelCase_ ).reshape(
lowerCamelCase_ ,lowerCamelCase_ )
data_featuremap.append(lowerCamelCase_ )
# expanding the data slice to One dimenssion
UpperCAmelCase_ : Optional[Any] = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(lowerCamelCase_ ) )
UpperCAmelCase_ : Optional[int] = np.asarray(lowerCamelCase_ )
return focus_list, data_featuremap
def A__ ( self: Tuple ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Optional[Any]="average_pool" ) -> List[Any]:
# pooling process
UpperCAmelCase_ : Optional[Any] = len(featuremaps[0] )
UpperCAmelCase_ : Any = int(size_map / size_pooling )
UpperCAmelCase_ : Optional[int] = []
for i_map in range(len(lowerCamelCase_ ) ):
UpperCAmelCase_ : Any = featuremaps[i_map]
UpperCAmelCase_ : Tuple = []
for i_focus in range(0 ,lowerCamelCase_ ,lowerCamelCase_ ):
for j_focus in range(0 ,lowerCamelCase_ ,lowerCamelCase_ ):
UpperCAmelCase_ : str = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(lowerCamelCase_ ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(lowerCamelCase_ ) )
UpperCAmelCase_ : int = np.asmatrix(lowerCamelCase_ ).reshape(lowerCamelCase_ ,lowerCamelCase_ )
featuremap_pooled.append(lowerCamelCase_ )
return featuremap_pooled
def A__ ( self: Union[str, Any] ,lowerCamelCase_: Tuple ) -> Optional[int]:
# expanding three dimension data to one dimension list
UpperCAmelCase_ : List[Any] = []
for i in range(len(lowerCamelCase_ ) ):
UpperCAmelCase_ : Tuple = np.shape(data[i] )
UpperCAmelCase_ : Optional[int] = data[i].reshape(1 ,shapes[0] * shapes[1] )
UpperCAmelCase_ : Optional[int] = data_listed.getA().tolist()[0]
data_expanded.extend(lowerCamelCase_ )
UpperCAmelCase_ : int = np.asarray(lowerCamelCase_ )
return data_expanded
def A__ ( self: Optional[Any] ,lowerCamelCase_: Optional[int] ) -> Union[str, Any]:
# expanding matrix to one dimension list
UpperCAmelCase_ : List[Any] = np.asarray(lowerCamelCase_ )
UpperCAmelCase_ : str = np.shape(lowerCamelCase_ )
UpperCAmelCase_ : Dict = data_mat.reshape(1 ,shapes[0] * shapes[1] )
return data_expanded
def A__ ( self: str ,lowerCamelCase_: Dict ,lowerCamelCase_: int ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Any ) -> Union[str, Any]:
UpperCAmelCase_ : Any = []
UpperCAmelCase_ : Tuple = 0
for i_map in range(lowerCamelCase_ ):
UpperCAmelCase_ : Optional[Any] = np.ones((size_map, size_map) )
for i in range(0 ,lowerCamelCase_ ,lowerCamelCase_ ):
for j in range(0 ,lowerCamelCase_ ,lowerCamelCase_ ):
UpperCAmelCase_ : Any = pd_pool[
i_pool
]
UpperCAmelCase_ : List[str] = i_pool + 1
UpperCAmelCase_ : Optional[Any] = np.multiply(
lowerCamelCase_ ,np.multiply(out_map[i_map] ,(1 - out_map[i_map]) ) )
pd_all.append(lowerCamelCase_ )
return pd_all
def A__ ( self: str ,lowerCamelCase_: int ,lowerCamelCase_: int ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Any ,lowerCamelCase_: List[str] ,lowerCamelCase_: Any=bool ) -> Optional[int]:
# model traning
print("""----------------------Start Training-------------------------""" )
print((""" - - Shape: Train_Data """, np.shape(lowerCamelCase_ )) )
print((""" - - Shape: Teach_Data """, np.shape(lowerCamelCase_ )) )
UpperCAmelCase_ : str = 0
UpperCAmelCase_ : Tuple = []
UpperCAmelCase_ : Any = 10000
while rp < n_repeat and mse >= error_accuracy:
UpperCAmelCase_ : List[str] = 0
print(F'''-------------Learning Time {rp}--------------''' )
for p in range(len(lowerCamelCase_ ) ):
# print('------------Learning Image: %d--------------'%p)
UpperCAmelCase_ : str = np.asmatrix(datas_train[p] )
UpperCAmelCase_ : Optional[Any] = np.asarray(datas_teach[p] )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.convolute(
lowerCamelCase_ ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,)
UpperCAmelCase_ : List[Any] = self.pooling(lowerCamelCase_ ,self.size_poolinga )
UpperCAmelCase_ : int = np.shape(lowerCamelCase_ )
UpperCAmelCase_ : Dict = self._expand(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = data_bp_input
UpperCAmelCase_ : Optional[Any] = np.dot(lowerCamelCase_ ,self.vji.T ) - self.thre_bpa
UpperCAmelCase_ : int = self.sig(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = np.dot(lowerCamelCase_ ,self.wkj.T ) - self.thre_bpa
UpperCAmelCase_ : Optional[Any] = self.sig(lowerCamelCase_ )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
UpperCAmelCase_ : List[str] = np.multiply(
(data_teach - bp_outa) ,np.multiply(lowerCamelCase_ ,(1 - bp_outa) ) )
UpperCAmelCase_ : List[Any] = np.multiply(
np.dot(lowerCamelCase_ ,self.wkj ) ,np.multiply(lowerCamelCase_ ,(1 - bp_outa) ) )
UpperCAmelCase_ : Any = np.dot(lowerCamelCase_ ,self.vji )
UpperCAmelCase_ : Tuple = pd_i_all / (self.size_poolinga * self.size_poolinga)
UpperCAmelCase_ : List[str] = pd_conva_pooled.T.getA().tolist()
UpperCAmelCase_ : str = self._calculate_gradient_from_pool(
lowerCamelCase_ ,lowerCamelCase_ ,shape_featuremapa[0] ,shape_featuremapa[1] ,self.size_poolinga ,)
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
UpperCAmelCase_ : List[str] = self._expand_mat(pd_conva_all[k_conv] )
UpperCAmelCase_ : Optional[Any] = self.rate_weight * np.dot(lowerCamelCase_ ,lowerCamelCase_ )
UpperCAmelCase_ : int = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
UpperCAmelCase_ : str = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
UpperCAmelCase_ : int = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
UpperCAmelCase_ : Tuple = self.vji + pd_j_all.T * bp_outa * self.rate_weight
UpperCAmelCase_ : int = self.thre_bpa - pd_k_all * self.rate_thre
UpperCAmelCase_ : str = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
UpperCAmelCase_ : int = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
UpperCAmelCase_ : int = rp + 1
UpperCAmelCase_ : Any = error_count / patterns
all_mse.append(lowerCamelCase_ )
def draw_error():
UpperCAmelCase_ : Any = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(lowerCamelCase_ ,"""+-""" )
plt.plot(lowerCamelCase_ ,"""r--""" )
plt.xlabel("""Learning Times""" )
plt.ylabel("""All_mse""" )
plt.grid(lowerCamelCase_ ,alpha=0.5 )
plt.show()
print("""------------------Training Complished---------------------""" )
print((""" - - Training epoch: """, rp, F''' - - Mse: {mse:.6f}''') )
if draw_e:
draw_error()
return mse
def A__ ( self: Optional[int] ,lowerCamelCase_: Any ) -> Tuple:
# model predict
UpperCAmelCase_ : Union[str, Any] = []
print("""-------------------Start Testing-------------------------""" )
print((""" - - Shape: Test_Data """, np.shape(lowerCamelCase_ )) )
for p in range(len(lowerCamelCase_ ) ):
UpperCAmelCase_ : int = np.asmatrix(datas_test[p] )
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.convolute(
lowerCamelCase_ ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,)
UpperCAmelCase_ : Optional[Any] = self.pooling(lowerCamelCase_ ,self.size_poolinga )
UpperCAmelCase_ : str = self._expand(lowerCamelCase_ )
UpperCAmelCase_ : str = data_bp_input
UpperCAmelCase_ : Union[str, Any] = bp_outa * self.vji.T - self.thre_bpa
UpperCAmelCase_ : Optional[int] = self.sig(lowerCamelCase_ )
UpperCAmelCase_ : Tuple = bp_outa * self.wkj.T - self.thre_bpa
UpperCAmelCase_ : List[Any] = self.sig(lowerCamelCase_ )
produce_out.extend(bp_outa.getA().tolist() )
UpperCAmelCase_ : int = [list(map(self.do_round ,lowerCamelCase_ ) ) for each in produce_out]
return np.asarray(lowerCamelCase_ )
def A__ ( self: Optional[Any] ,lowerCamelCase_: Dict ) -> Tuple:
# return the data of image after convoluting process so we can check it out
UpperCAmelCase_ : Optional[int] = np.asmatrix(lowerCamelCase_ )
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self.convolute(
lowerCamelCase_ ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,)
UpperCAmelCase_ : Dict = self.pooling(lowerCamelCase_ ,self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 345 | 1 |
from ..utils import DummyObject, requires_backends
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Dict = ["sentencepiece"]
def __init__( self: Optional[int] ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: List[str] ) -> Any:
requires_backends(self ,["""sentencepiece"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Tuple = ["sentencepiece"]
def __init__( self: Dict ,*lowerCamelCase_: int ,**lowerCamelCase_: List[str] ) -> List[Any]:
requires_backends(self ,["""sentencepiece"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : List[str] = ["sentencepiece"]
def __init__( self: str ,*lowerCamelCase_: int ,**lowerCamelCase_: Any ) -> str:
requires_backends(self ,["""sentencepiece"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Optional[int] = ["sentencepiece"]
def __init__( self: List[str] ,*lowerCamelCase_: int ,**lowerCamelCase_: List[Any] ) -> str:
requires_backends(self ,["""sentencepiece"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Optional[int] = ["sentencepiece"]
def __init__( self: Optional[Any] ,*lowerCamelCase_: List[str] ,**lowerCamelCase_: Dict ) -> Any:
requires_backends(self ,["""sentencepiece"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Any = ["sentencepiece"]
def __init__( self: Any ,*lowerCamelCase_: List[str] ,**lowerCamelCase_: str ) -> Dict:
requires_backends(self ,["""sentencepiece"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Optional[int] = ["sentencepiece"]
def __init__( self: List[str] ,*lowerCamelCase_: Union[str, Any] ,**lowerCamelCase_: str ) -> Dict:
requires_backends(self ,["""sentencepiece"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : int = ["sentencepiece"]
def __init__( self: Union[str, Any] ,*lowerCamelCase_: Dict ,**lowerCamelCase_: Union[str, Any] ) -> Any:
requires_backends(self ,["""sentencepiece"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Dict = ["sentencepiece"]
def __init__( self: Optional[int] ,*lowerCamelCase_: List[str] ,**lowerCamelCase_: Tuple ) -> Union[str, Any]:
requires_backends(self ,["""sentencepiece"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Dict = ["sentencepiece"]
def __init__( self: Optional[int] ,*lowerCamelCase_: int ,**lowerCamelCase_: int ) -> Tuple:
requires_backends(self ,["""sentencepiece"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : int = ["sentencepiece"]
def __init__( self: Union[str, Any] ,*lowerCamelCase_: int ,**lowerCamelCase_: List[str] ) -> Dict:
requires_backends(self ,["""sentencepiece"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : List[Any] = ["sentencepiece"]
def __init__( self: str ,*lowerCamelCase_: List[str] ,**lowerCamelCase_: Union[str, Any] ) -> List[Any]:
requires_backends(self ,["""sentencepiece"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : str = ["sentencepiece"]
def __init__( self: Union[str, Any] ,*lowerCamelCase_: Tuple ,**lowerCamelCase_: Dict ) -> Dict:
requires_backends(self ,["""sentencepiece"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Optional[Any] = ["sentencepiece"]
def __init__( self: List[Any] ,*lowerCamelCase_: Dict ,**lowerCamelCase_: List[str] ) -> Optional[int]:
requires_backends(self ,["""sentencepiece"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Dict = ["sentencepiece"]
def __init__( self: Tuple ,*lowerCamelCase_: Optional[int] ,**lowerCamelCase_: Optional[int] ) -> List[str]:
requires_backends(self ,["""sentencepiece"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : List[Any] = ["sentencepiece"]
def __init__( self: Optional[Any] ,*lowerCamelCase_: Any ,**lowerCamelCase_: Union[str, Any] ) -> Optional[Any]:
requires_backends(self ,["""sentencepiece"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : str = ["sentencepiece"]
def __init__( self: List[Any] ,*lowerCamelCase_: Tuple ,**lowerCamelCase_: List[str] ) -> Optional[int]:
requires_backends(self ,["""sentencepiece"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Union[str, Any] = ["sentencepiece"]
def __init__( self: Optional[int] ,*lowerCamelCase_: Union[str, Any] ,**lowerCamelCase_: Optional[Any] ) -> Tuple:
requires_backends(self ,["""sentencepiece"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Optional[Any] = ["sentencepiece"]
def __init__( self: Tuple ,*lowerCamelCase_: List[str] ,**lowerCamelCase_: Union[str, Any] ) -> Optional[int]:
requires_backends(self ,["""sentencepiece"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Optional[int] = ["sentencepiece"]
def __init__( self: str ,*lowerCamelCase_: Optional[int] ,**lowerCamelCase_: Dict ) -> Tuple:
requires_backends(self ,["""sentencepiece"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Dict = ["sentencepiece"]
def __init__( self: Tuple ,*lowerCamelCase_: Any ,**lowerCamelCase_: Optional[int] ) -> Dict:
requires_backends(self ,["""sentencepiece"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : List[str] = ["sentencepiece"]
def __init__( self: List[str] ,*lowerCamelCase_: Any ,**lowerCamelCase_: Optional[int] ) -> str:
requires_backends(self ,["""sentencepiece"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Optional[Any] = ["sentencepiece"]
def __init__( self: Union[str, Any] ,*lowerCamelCase_: Union[str, Any] ,**lowerCamelCase_: Any ) -> Dict:
requires_backends(self ,["""sentencepiece"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Dict = ["sentencepiece"]
def __init__( self: Optional[Any] ,*lowerCamelCase_: Optional[int] ,**lowerCamelCase_: List[Any] ) -> Optional[Any]:
requires_backends(self ,["""sentencepiece"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : int = ["sentencepiece"]
def __init__( self: Union[str, Any] ,*lowerCamelCase_: int ,**lowerCamelCase_: Union[str, Any] ) -> str:
requires_backends(self ,["""sentencepiece"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Tuple = ["sentencepiece"]
def __init__( self: List[Any] ,*lowerCamelCase_: Dict ,**lowerCamelCase_: Tuple ) -> int:
requires_backends(self ,["""sentencepiece"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Optional[int] = ["sentencepiece"]
def __init__( self: Tuple ,*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: Union[str, Any] ) -> Dict:
requires_backends(self ,["""sentencepiece"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Optional[Any] = ["sentencepiece"]
def __init__( self: Tuple ,*lowerCamelCase_: int ,**lowerCamelCase_: List[Any] ) -> Optional[int]:
requires_backends(self ,["""sentencepiece"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : int = ["sentencepiece"]
def __init__( self: Dict ,*lowerCamelCase_: int ,**lowerCamelCase_: int ) -> str:
requires_backends(self ,["""sentencepiece"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : int = ["sentencepiece"]
def __init__( self: Union[str, Any] ,*lowerCamelCase_: int ,**lowerCamelCase_: Optional[int] ) -> List[str]:
requires_backends(self ,["""sentencepiece"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : List[str] = ["sentencepiece"]
def __init__( self: Optional[Any] ,*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: Dict ) -> str:
requires_backends(self ,["""sentencepiece"""] )
| 345 |
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _snake_case ( __snake_case , unittest.TestCase ):
'''simple docstring'''
A__ : Optional[Any] = CTRLTokenizer
A__ : Optional[Any] = False
A__ : str = False
def A__ ( self: Optional[int] ) -> List[Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase_ : Dict = ["""adapt""", """re@@""", """a@@""", """apt""", """c@@""", """t""", """<unk>"""]
UpperCAmelCase_ : Union[str, Any] = dict(zip(lowerCamelCase_ ,range(len(lowerCamelCase_ ) ) ) )
UpperCAmelCase_ : List[Any] = ["""#version: 0.2""", """a p""", """ap t</w>""", """r e""", """a d""", """ad apt</w>""", """"""]
UpperCAmelCase_ : Optional[Any] = {"""unk_token""": """<unk>"""}
UpperCAmelCase_ : Union[str, Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCAmelCase_ : Optional[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCamelCase_ ) + """\n""" )
with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowerCamelCase_ ) )
def A__ ( self: Optional[int] ,**lowerCamelCase_: Any ) -> str:
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname ,**lowerCamelCase_ )
def A__ ( self: int ,lowerCamelCase_: int ) -> str:
UpperCAmelCase_ : List[str] = """adapt react readapt apt"""
UpperCAmelCase_ : List[Any] = """adapt react readapt apt"""
return input_text, output_text
def A__ ( self: Union[str, Any] ) -> Optional[int]:
UpperCAmelCase_ : Union[str, Any] = CTRLTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
UpperCAmelCase_ : List[Any] = """adapt react readapt apt"""
UpperCAmelCase_ : Optional[int] = """adapt re@@ a@@ c@@ t re@@ adapt apt""".split()
UpperCAmelCase_ : Tuple = tokenizer.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ ,lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = tokens + [tokenizer.unk_token]
UpperCAmelCase_ : List[str] = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) ,lowerCamelCase_ )
| 345 | 1 |
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class _snake_case :
'''simple docstring'''
def __init__( self: Any ,lowerCamelCase_: Optional[int] ,) -> Dict:
UpperCAmelCase_ : Any = parent
UpperCAmelCase_ : str = 13
UpperCAmelCase_ : Optional[Any] = 7
UpperCAmelCase_ : str = True
UpperCAmelCase_ : Union[str, Any] = True
UpperCAmelCase_ : Optional[Any] = True
UpperCAmelCase_ : str = 99
UpperCAmelCase_ : Union[str, Any] = 32
UpperCAmelCase_ : Any = 2
UpperCAmelCase_ : Optional[Any] = 4
UpperCAmelCase_ : str = 37
UpperCAmelCase_ : List[Any] = """gelu"""
UpperCAmelCase_ : str = 0.1
UpperCAmelCase_ : int = 0.1
UpperCAmelCase_ : Dict = 512
UpperCAmelCase_ : List[Any] = 16
UpperCAmelCase_ : Optional[Any] = 2
UpperCAmelCase_ : Optional[Any] = 0.0_2
UpperCAmelCase_ : Union[str, Any] = 3
UpperCAmelCase_ : List[str] = 4
UpperCAmelCase_ : Optional[int] = None
def A__ ( self: Optional[Any] ) -> List[Any]:
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase_ : Optional[Any] = None
if self.use_input_mask:
UpperCAmelCase_ : Dict = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ : Tuple = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : Tuple = None
if self.use_labels:
UpperCAmelCase_ : Any = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase_ : Dict = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
UpperCAmelCase_ : Tuple = ids_tensor([self.batch_size] ,self.num_choices )
UpperCAmelCase_ : str = EsmConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,pad_token_id=1 ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,)
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self: Optional[Any] ) -> int:
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : Tuple = self.prepare_config_and_inputs()
UpperCAmelCase_ : Any = True
UpperCAmelCase_ : Tuple = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCAmelCase_ : Dict = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def A__ ( self: List[Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: int ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Any ) -> Optional[Any]:
UpperCAmelCase_ : Optional[int] = TFEsmModel(config=lowerCamelCase_ )
UpperCAmelCase_ : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
UpperCAmelCase_ : Dict = model(lowerCamelCase_ )
UpperCAmelCase_ : Tuple = [input_ids, input_mask]
UpperCAmelCase_ : Any = model(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def A__ ( self: int ,lowerCamelCase_: int ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Dict ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Dict ,lowerCamelCase_: str ,lowerCamelCase_: Union[str, Any] ,) -> List[Any]:
UpperCAmelCase_ : List[Any] = True
UpperCAmelCase_ : Any = TFEsmModel(config=lowerCamelCase_ )
UpperCAmelCase_ : Dict = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""encoder_hidden_states""": encoder_hidden_states,
"""encoder_attention_mask""": encoder_attention_mask,
}
UpperCAmelCase_ : Dict = model(lowerCamelCase_ )
UpperCAmelCase_ : int = [input_ids, input_mask]
UpperCAmelCase_ : List[Any] = model(lowerCamelCase_ ,encoder_hidden_states=lowerCamelCase_ )
# Also check the case where encoder outputs are not passed
UpperCAmelCase_ : List[Any] = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def A__ ( self: Tuple ,lowerCamelCase_: List[str] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: str ,lowerCamelCase_: int ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[Any] = TFEsmForMaskedLM(config=lowerCamelCase_ )
UpperCAmelCase_ : int = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def A__ ( self: List[str] ,lowerCamelCase_: Dict ,lowerCamelCase_: Dict ,lowerCamelCase_: str ,lowerCamelCase_: Tuple ,lowerCamelCase_: int ,lowerCamelCase_: Any ) -> str:
UpperCAmelCase_ : str = self.num_labels
UpperCAmelCase_ : List[str] = TFEsmForTokenClassification(config=lowerCamelCase_ )
UpperCAmelCase_ : int = {"""input_ids""": input_ids, """attention_mask""": input_mask}
UpperCAmelCase_ : Optional[int] = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def A__ ( self: List[Any] ) -> List[Any]:
UpperCAmelCase_ : Any = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : Optional[Any] = config_and_inputs
UpperCAmelCase_ : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class _snake_case ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
A__ : Union[str, Any] = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
A__ : Tuple = (
{
"feature-extraction": TFEsmModel,
"fill-mask": TFEsmForMaskedLM,
"text-classification": TFEsmForSequenceClassification,
"token-classification": TFEsmForTokenClassification,
"zero-shot": TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
A__ : int = False
A__ : int = False
def A__ ( self: List[Any] ) -> Tuple:
UpperCAmelCase_ : str = TFEsmModelTester(self )
UpperCAmelCase_ : List[Any] = ConfigTester(self ,config_class=lowerCamelCase_ ,hidden_size=37 )
def A__ ( self: List[str] ) -> List[Any]:
self.config_tester.run_common_tests()
def A__ ( self: Union[str, Any] ) -> Dict:
UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def A__ ( self: Dict ) -> Optional[Any]:
UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowerCamelCase_ )
def A__ ( self: Tuple ) -> int:
UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase_ )
def A__ ( self: int ) -> str:
UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase_ )
@slow
def A__ ( self: str ) -> Optional[Any]:
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Optional[Any] = TFEsmModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
@unittest.skip("""Protein models do not support embedding resizing.""" )
def A__ ( self: List[str] ) -> Optional[int]:
pass
@unittest.skip("""Protein models do not support embedding resizing.""" )
def A__ ( self: Tuple ) -> List[str]:
pass
def A__ ( self: Any ) -> Any:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Tuple = model_class(lowerCamelCase_ )
assert isinstance(model.get_input_embeddings() ,tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
UpperCAmelCase_ : Optional[Any] = model.get_bias()
assert isinstance(lowerCamelCase_ ,lowerCamelCase_ )
for k, v in name.items():
assert isinstance(lowerCamelCase_ ,tf.Variable )
else:
UpperCAmelCase_ : Union[str, Any] = model.get_output_embeddings()
assert x is None
UpperCAmelCase_ : Optional[int] = model.get_bias()
assert name is None
@require_tf
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
@slow
def A__ ( self: Tuple ) -> int:
UpperCAmelCase_ : Any = TFEsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
UpperCAmelCase_ : Dict = tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCAmelCase_ : Tuple = model(lowerCamelCase_ )[0]
UpperCAmelCase_ : Any = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) ,lowerCamelCase_ )
# compare the actual values for a slice.
UpperCAmelCase_ : Union[str, Any] = tf.constant(
[
[
[8.9_2_1_5_1_8, -1_0.5_8_9_8_1_4, -6.4_6_7_1_3_0_7],
[-6.3_9_6_7_1_5_6, -1_3.9_1_1_3_7_7, -1.1_2_1_1_9_1_5],
[-7.7_8_1_2_4_7, -1_3.9_5_1_5_5_7, -3.7_4_0_5_9_2],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1e-2 ) )
@slow
def A__ ( self: Any ) -> Optional[Any]:
UpperCAmelCase_ : int = TFEsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
UpperCAmelCase_ : int = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
UpperCAmelCase_ : Optional[Any] = model(lowerCamelCase_ )[0]
# compare the actual values for a slice.
UpperCAmelCase_ : Any = tf.constant(
[
[
[0.1_4_4_4_3_0_9_2, 0.5_4_1_2_5_3_2_7, 0.3_2_4_7_7_3_9],
[0.3_0_3_4_0_4_8_4, 0.0_0_5_2_6_6_7_6, 0.3_1_0_7_7_7_2_2],
[0.3_2_2_7_8_0_4_3, -0.2_4_9_8_7_0_9_6, 0.3_4_1_4_6_2_8],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1e-4 ) )
| 345 |
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
UpperCamelCase_ = {
'''susnato/ernie-m-base_pytorch''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json''',
'''susnato/ernie-m-large_pytorch''': '''https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json''',
}
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : Union[str, Any] = "ernie_m"
A__ : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self: str ,lowerCamelCase_: int = 250002 ,lowerCamelCase_: int = 768 ,lowerCamelCase_: int = 12 ,lowerCamelCase_: int = 12 ,lowerCamelCase_: int = 3072 ,lowerCamelCase_: str = "gelu" ,lowerCamelCase_: float = 0.1 ,lowerCamelCase_: float = 0.1 ,lowerCamelCase_: int = 514 ,lowerCamelCase_: float = 0.0_2 ,lowerCamelCase_: int = 1 ,lowerCamelCase_: float = 1e-05 ,lowerCamelCase_: Any=None ,lowerCamelCase_: List[Any]=False ,lowerCamelCase_: Tuple=0.0 ,**lowerCamelCase_: Optional[int] ,) -> Optional[Any]:
super().__init__(pad_token_id=lowerCamelCase_ ,**lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = vocab_size
UpperCAmelCase_ : Any = hidden_size
UpperCAmelCase_ : Optional[Any] = num_hidden_layers
UpperCAmelCase_ : Union[str, Any] = num_attention_heads
UpperCAmelCase_ : List[Any] = intermediate_size
UpperCAmelCase_ : List[Any] = hidden_act
UpperCAmelCase_ : Any = hidden_dropout_prob
UpperCAmelCase_ : List[Any] = attention_probs_dropout_prob
UpperCAmelCase_ : str = max_position_embeddings
UpperCAmelCase_ : Union[str, Any] = initializer_range
UpperCAmelCase_ : Union[str, Any] = layer_norm_eps
UpperCAmelCase_ : List[Any] = classifier_dropout
UpperCAmelCase_ : str = is_decoder
UpperCAmelCase_ : List[str] = act_dropout
| 345 | 1 |
from sklearn.metrics import mean_squared_error
import datasets
UpperCamelCase_ = '''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
UpperCamelCase_ = '''\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
'''
UpperCamelCase_ = '''
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
"raw_values" : Returns a full set of errors in case of multioutput input.
"uniform_average" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric("mse")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{\'mse\': 0.6123724356957945}
If you\'re using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric("mse", "multilist")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{\'mse\': array([0.41666667, 1. ])}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
'''simple docstring'''
def A__ ( self: Dict ) -> List[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(self._get_feature_types() ) ,reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html"""
] ,)
def A__ ( self: Optional[int] ) -> Union[str, Any]:
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value("""float""" ) ),
"references": datasets.Sequence(datasets.Value("""float""" ) ),
}
else:
return {
"predictions": datasets.Value("""float""" ),
"references": datasets.Value("""float""" ),
}
def A__ ( self: Tuple ,lowerCamelCase_: Tuple ,lowerCamelCase_: str ,lowerCamelCase_: Tuple=None ,lowerCamelCase_: Tuple="uniform_average" ,lowerCamelCase_: Optional[Any]=True ) -> Optional[Any]:
UpperCAmelCase_ : Dict = mean_squared_error(
lowerCamelCase_ ,lowerCamelCase_ ,sample_weight=lowerCamelCase_ ,multioutput=lowerCamelCase_ ,squared=lowerCamelCase_ )
return {"mse": mse}
| 345 |
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
UpperCamelCase_ = logging.getLogger(__name__)
torch.set_grad_enabled(False)
UpperCamelCase_ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
def lowerCamelCase_ ( _a : str , _a : Any=100 , _a : int=" " ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = text.split(_a )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(_a ) , _a )]
def lowerCamelCase_ ( _a : dict ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ : Dict = [], []
for title, text in zip(documents["""title"""] , documents["""text"""] ):
if text is not None:
for passage in split_text(_a ):
titles.append(title if title is not None else """""" )
texts.append(_a )
return {"title": titles, "text": texts}
def lowerCamelCase_ ( _a : dict , _a : DPRContextEncoder , _a : DPRContextEncoderTokenizerFast ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = ctx_tokenizer(
documents["""title"""] , documents["""text"""] , truncation=_a , padding="""longest""" , return_tensors="""pt""" )["""input_ids"""]
UpperCAmelCase_ : Tuple = ctx_encoder(input_ids.to(device=_a ) , return_dict=_a ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def lowerCamelCase_ ( _a : "RagExampleArguments" , _a : "ProcessingArguments" , _a : "IndexHnswArguments" , ):
'''simple docstring'''
logger.info("""Step 1 - Create the dataset""" )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
UpperCAmelCase_ : Optional[int] = load_dataset(
"""csv""" , data_files=[rag_example_args.csv_path] , split="""train""" , delimiter="""\t""" , column_names=["""title""", """text"""] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
UpperCAmelCase_ : Tuple = dataset.map(_a , batched=_a , num_proc=processing_args.num_proc )
# And compute the embeddings
UpperCAmelCase_ : List[str] = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=_a )
UpperCAmelCase_ : Dict = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
UpperCAmelCase_ : Any = Features(
{"""text""": Value("""string""" ), """title""": Value("""string""" ), """embeddings""": Sequence(Value("""float32""" ) )} ) # optional, save as float32 instead of float64 to save space
UpperCAmelCase_ : List[str] = dataset.map(
partial(_a , ctx_encoder=_a , ctx_tokenizer=_a ) , batched=_a , batch_size=processing_args.batch_size , features=_a , )
# And finally save your dataset
UpperCAmelCase_ : Union[str, Any] = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset""" )
dataset.save_to_disk(_a )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info("""Step 2 - Index the dataset""" )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
UpperCAmelCase_ : Union[str, Any] = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index("""embeddings""" , custom_index=_a )
# And save the index
UpperCAmelCase_ : Optional[Any] = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset_hnsw_index.faiss""" )
dataset.get_index("""embeddings""" ).save(_a )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class _snake_case :
'''simple docstring'''
A__ : str = field(
default=str(Path(__snake_case ).parent / "test_run" / "dummy-kb" / "my_knowledge_dataset.csv" ) , metadata={"help": "Path to a tab-separated csv file with columns 'title' and 'text'"} , )
A__ : Optional[str] = field(
default=__snake_case , metadata={"help": "Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."} , )
A__ : str = field(
default="facebook/rag-sequence-nq" , metadata={"help": "The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"} , )
A__ : str = field(
default="facebook/dpr-ctx_encoder-multiset-base" , metadata={
"help": (
"The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or"
" 'facebook/dpr-ctx_encoder-multiset-base'"
)
} , )
A__ : Optional[str] = field(
default=str(Path(__snake_case ).parent / "test_run" / "dummy-kb" ) , metadata={"help": "Path to a directory where the dataset passages and the index will be saved"} , )
@dataclass
class _snake_case :
'''simple docstring'''
A__ : Optional[int] = field(
default=__snake_case , metadata={
"help": "The number of processes to use to split the documents into passages. Default is single process."
} , )
A__ : int = field(
default=16 , metadata={
"help": "The batch size to use when computing the passages embeddings using the DPR context encoder."
} , )
@dataclass
class _snake_case :
'''simple docstring'''
A__ : int = field(
default=768 , metadata={"help": "The dimension of the embeddings to pass to the HNSW Faiss index."} , )
A__ : int = field(
default=128 , metadata={
"help": (
"The number of bi-directional links created for every new element during the HNSW index construction."
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
UpperCamelCase_ = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
UpperCamelCase_ = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 345 | 1 |
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
UpperCamelCase_ = None
try:
import msvcrt
except ImportError:
UpperCamelCase_ = None
try:
import fcntl
except ImportError:
UpperCamelCase_ = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
UpperCamelCase_ = OSError
# Data
# ------------------------------------------------
UpperCamelCase_ = [
'''Timeout''',
'''BaseFileLock''',
'''WindowsFileLock''',
'''UnixFileLock''',
'''SoftFileLock''',
'''FileLock''',
]
UpperCamelCase_ = '''3.0.12'''
UpperCamelCase_ = None
def lowerCamelCase_ ( ):
'''simple docstring'''
global _logger
UpperCAmelCase_ : str = _logger or logging.getLogger(__name__ )
return _logger
class _snake_case ( __snake_case ):
'''simple docstring'''
def __init__( self: Optional[Any] ,lowerCamelCase_: Any ) -> Any:
UpperCAmelCase_ : Dict = lock_file
return None
def __str__( self: List[Any] ) -> Tuple:
UpperCAmelCase_ : Union[str, Any] = F'''The file lock \'{self.lock_file}\' could not be acquired.'''
return temp
class _snake_case :
'''simple docstring'''
def __init__( self: int ,lowerCamelCase_: Optional[Any] ) -> Tuple:
UpperCAmelCase_ : Optional[int] = lock
return None
def __enter__( self: List[str] ) -> Union[str, Any]:
return self.lock
def __exit__( self: int ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: List[str] ) -> Union[str, Any]:
self.lock.release()
return None
class _snake_case :
'''simple docstring'''
def __init__( self: str ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Union[str, Any]=-1 ,lowerCamelCase_: List[str]=None ) -> int:
UpperCAmelCase_ : Any = max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
UpperCAmelCase_ : List[Any] = self.hash_filename_if_too_long(lowerCamelCase_ ,lowerCamelCase_ )
# The path to the lock file.
UpperCAmelCase_ : Optional[int] = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
UpperCAmelCase_ : Dict = None
# The default timeout value.
UpperCAmelCase_ : Optional[int] = timeout
# We use this lock primarily for the lock counter.
UpperCAmelCase_ : List[str] = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
UpperCAmelCase_ : Optional[Any] = 0
return None
@property
def A__ ( self: Optional[int] ) -> Optional[int]:
return self._lock_file
@property
def A__ ( self: Dict ) -> Dict:
return self._timeout
@timeout.setter
def A__ ( self: Optional[int] ,lowerCamelCase_: Dict ) -> List[Any]:
UpperCAmelCase_ : Optional[int] = float(lowerCamelCase_ )
return None
def A__ ( self: Tuple ) -> str:
raise NotImplementedError()
def A__ ( self: Union[str, Any] ) -> Dict:
raise NotImplementedError()
@property
def A__ ( self: Union[str, Any] ) -> Optional[Any]:
return self._lock_file_fd is not None
def A__ ( self: int ,lowerCamelCase_: List[str]=None ,lowerCamelCase_: str=0.0_5 ) -> Optional[int]:
# Use the default timeout, if no timeout is provided.
if timeout is None:
UpperCAmelCase_ : Dict = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
UpperCAmelCase_ : int = id(self )
UpperCAmelCase_ : List[str] = self._lock_file
UpperCAmelCase_ : List[str] = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(F'''Attempting to acquire lock {lock_id} on {lock_filename}''' )
self._acquire()
if self.is_locked:
logger().debug(F'''Lock {lock_id} acquired on {lock_filename}''' )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(F'''Timeout on acquiring lock {lock_id} on {lock_filename}''' )
raise Timeout(self._lock_file )
else:
logger().debug(
F'''Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...''' )
time.sleep(lowerCamelCase_ )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
UpperCAmelCase_ : List[Any] = max(0 ,self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def A__ ( self: Dict ,lowerCamelCase_: Dict=False ) -> Any:
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
UpperCAmelCase_ : Any = id(self )
UpperCAmelCase_ : List[Any] = self._lock_file
logger().debug(F'''Attempting to release lock {lock_id} on {lock_filename}''' )
self._release()
UpperCAmelCase_ : int = 0
logger().debug(F'''Lock {lock_id} released on {lock_filename}''' )
return None
def __enter__( self: str ) -> List[str]:
self.acquire()
return self
def __exit__( self: Union[str, Any] ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: str ,lowerCamelCase_: List[Any] ) -> Optional[int]:
self.release()
return None
def __del__( self: Optional[Any] ) -> Optional[Any]:
self.release(force=lowerCamelCase_ )
return None
def A__ ( self: str ,lowerCamelCase_: str ,lowerCamelCase_: int ) -> str:
UpperCAmelCase_ : Optional[int] = os.path.basename(lowerCamelCase_ )
if len(lowerCamelCase_ ) > max_length and max_length > 0:
UpperCAmelCase_ : List[Any] = os.path.dirname(lowerCamelCase_ )
UpperCAmelCase_ : List[str] = str(hash(lowerCamelCase_ ) )
UpperCAmelCase_ : int = filename[: max_length - len(lowerCamelCase_ ) - 8] + """...""" + hashed_filename + """.lock"""
return os.path.join(lowerCamelCase_ ,lowerCamelCase_ )
else:
return path
class _snake_case ( __snake_case ):
'''simple docstring'''
def __init__( self: Tuple ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: List[Any]=-1 ,lowerCamelCase_: str=None ) -> Union[str, Any]:
from .file_utils import relative_to_absolute_path
super().__init__(lowerCamelCase_ ,timeout=lowerCamelCase_ ,max_filename_length=lowerCamelCase_ )
UpperCAmelCase_ : str = """\\\\?\\""" + relative_to_absolute_path(self.lock_file )
def A__ ( self: Dict ) -> Any:
UpperCAmelCase_ : Union[str, Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
UpperCAmelCase_ : Union[str, Any] = os.open(self._lock_file ,lowerCamelCase_ )
except OSError:
pass
else:
try:
msvcrt.locking(lowerCamelCase_ ,msvcrt.LK_NBLCK ,1 )
except OSError:
os.close(lowerCamelCase_ )
else:
UpperCAmelCase_ : Optional[Any] = fd
return None
def A__ ( self: List[str] ) -> str:
UpperCAmelCase_ : str = self._lock_file_fd
UpperCAmelCase_ : Dict = None
msvcrt.locking(lowerCamelCase_ ,msvcrt.LK_UNLCK ,1 )
os.close(lowerCamelCase_ )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class _snake_case ( __snake_case ):
'''simple docstring'''
def __init__( self: Dict ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: List[str]=-1 ,lowerCamelCase_: Tuple=None ) -> List[str]:
UpperCAmelCase_ : Optional[Any] = os.statvfs(os.path.dirname(lowerCamelCase_ ) ).f_namemax
super().__init__(lowerCamelCase_ ,timeout=lowerCamelCase_ ,max_filename_length=lowerCamelCase_ )
def A__ ( self: List[Any] ) -> Union[str, Any]:
UpperCAmelCase_ : str = os.O_RDWR | os.O_CREAT | os.O_TRUNC
UpperCAmelCase_ : Any = os.open(self._lock_file ,lowerCamelCase_ )
try:
fcntl.flock(lowerCamelCase_ ,fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(lowerCamelCase_ )
else:
UpperCAmelCase_ : int = fd
return None
def A__ ( self: Any ) -> Any:
# Do not remove the lockfile:
#
# https://github.com/benediktschmitt/py-filelock/issues/31
# https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition
UpperCAmelCase_ : str = self._lock_file_fd
UpperCAmelCase_ : str = None
fcntl.flock(lowerCamelCase_ ,fcntl.LOCK_UN )
os.close(lowerCamelCase_ )
return None
class _snake_case ( __snake_case ):
'''simple docstring'''
def A__ ( self: Dict ) -> Optional[int]:
UpperCAmelCase_ : int = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
UpperCAmelCase_ : List[str] = os.open(self._lock_file ,lowerCamelCase_ )
except OSError:
pass
else:
UpperCAmelCase_ : str = fd
return None
def A__ ( self: List[str] ) -> str:
os.close(self._lock_file_fd )
UpperCAmelCase_ : Dict = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
UpperCamelCase_ = None
if msvcrt:
UpperCamelCase_ = WindowsFileLock
elif fcntl:
UpperCamelCase_ = UnixFileLock
else:
UpperCamelCase_ = SoftFileLock
if warnings is not None:
warnings.warn('''only soft file lock is available''')
| 345 |
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class _snake_case ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
A__ : Dict = AutoencoderKL
A__ : Optional[int] = "sample"
A__ : Tuple = 1E-2
@property
def A__ ( self: List[Any] ) -> Union[str, Any]:
UpperCAmelCase_ : Tuple = 4
UpperCAmelCase_ : str = 3
UpperCAmelCase_ : Any = (32, 32)
UpperCAmelCase_ : Optional[int] = floats_tensor((batch_size, num_channels) + sizes ).to(lowerCamelCase_ )
return {"sample": image}
@property
def A__ ( self: List[str] ) -> Tuple:
return (3, 32, 32)
@property
def A__ ( self: Optional[Any] ) -> Any:
return (3, 32, 32)
def A__ ( self: Any ) -> Tuple:
UpperCAmelCase_ : List[Any] = {
"""block_out_channels""": [32, 64],
"""in_channels""": 3,
"""out_channels""": 3,
"""down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],
"""up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],
"""latent_channels""": 4,
}
UpperCAmelCase_ : int = self.dummy_input
return init_dict, inputs_dict
def A__ ( self: Optional[Any] ) -> int:
pass
def A__ ( self: str ) -> Any:
pass
@unittest.skipIf(torch_device == """mps""" ,"""Gradient checkpointing skipped on MPS""" )
def A__ ( self: Union[str, Any] ) -> Dict:
# enable deterministic behavior for gradient checkpointing
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.prepare_init_args_and_inputs_for_common()
UpperCAmelCase_ : List[Any] = self.model_class(**lowerCamelCase_ )
model.to(lowerCamelCase_ )
assert not model.is_gradient_checkpointing and model.training
UpperCAmelCase_ : Optional[Any] = model(**lowerCamelCase_ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
UpperCAmelCase_ : Any = torch.randn_like(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
UpperCAmelCase_ : str = self.model_class(**lowerCamelCase_ )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(lowerCamelCase_ )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
UpperCAmelCase_ : Optional[int] = model_a(**lowerCamelCase_ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
UpperCAmelCase_ : Dict = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1e-5 )
UpperCAmelCase_ : Dict = dict(model.named_parameters() )
UpperCAmelCase_ : Union[str, Any] = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data ,named_params_a[name].grad.data ,atol=5e-5 ) )
def A__ ( self: Optional[Any] ) -> str:
UpperCAmelCase_ , UpperCAmelCase_ : int = AutoencoderKL.from_pretrained("""fusing/autoencoder-kl-dummy""" ,output_loading_info=lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
self.assertEqual(len(loading_info["""missing_keys"""] ) ,0 )
model.to(lowerCamelCase_ )
UpperCAmelCase_ : Dict = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def A__ ( self: Optional[int] ) -> int:
UpperCAmelCase_ : Dict = AutoencoderKL.from_pretrained("""fusing/autoencoder-kl-dummy""" )
UpperCAmelCase_ : Tuple = model.to(lowerCamelCase_ )
model.eval()
if torch_device == "mps":
UpperCAmelCase_ : Tuple = torch.manual_seed(0 )
else:
UpperCAmelCase_ : Optional[int] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
UpperCAmelCase_ : str = torch.randn(
1 ,model.config.in_channels ,model.config.sample_size ,model.config.sample_size ,generator=torch.manual_seed(0 ) ,)
UpperCAmelCase_ : int = image.to(lowerCamelCase_ )
with torch.no_grad():
UpperCAmelCase_ : Dict = model(lowerCamelCase_ ,sample_posterior=lowerCamelCase_ ,generator=lowerCamelCase_ ).sample
UpperCAmelCase_ : Optional[int] = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
UpperCAmelCase_ : Tuple = torch.tensor(
[
-4.0078e-01,
-3.8323e-04,
-1.2681e-01,
-1.1462e-01,
2.0095e-01,
1.0893e-01,
-8.8247e-02,
-3.0361e-01,
-9.8644e-03,
] )
elif torch_device == "cpu":
UpperCAmelCase_ : List[str] = torch.tensor(
[-0.1_3_5_2, 0.0_8_7_8, 0.0_4_1_9, -0.0_8_1_8, -0.1_0_6_9, 0.0_6_8_8, -0.1_4_5_8, -0.4_4_4_6, -0.0_0_2_6] )
else:
UpperCAmelCase_ : List[str] = torch.tensor(
[-0.2_4_2_1, 0.4_6_4_2, 0.2_5_0_7, -0.0_4_3_8, 0.0_6_8_2, 0.3_1_6_0, -0.2_0_1_8, -0.0_7_2_7, 0.2_4_8_5] )
self.assertTrue(torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,rtol=1e-2 ) )
@slow
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: Any ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Any ) -> Optional[Any]:
return F'''gaussian_noise_s={seed}_shape={'_'.join([str(lowerCamelCase_ ) for s in shape] )}.npy'''
def A__ ( self: Union[str, Any] ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self: List[str] ,lowerCamelCase_: Optional[int]=0 ,lowerCamelCase_: List[Any]=(4, 3, 512, 512) ,lowerCamelCase_: Optional[Any]=False ) -> Optional[int]:
UpperCAmelCase_ : Tuple = torch.floataa if fpaa else torch.floataa
UpperCAmelCase_ : Tuple = torch.from_numpy(load_hf_numpy(self.get_file_format(lowerCamelCase_ ,lowerCamelCase_ ) ) ).to(lowerCamelCase_ ).to(lowerCamelCase_ )
return image
def A__ ( self: List[Any] ,lowerCamelCase_: List[str]="CompVis/stable-diffusion-v1-4" ,lowerCamelCase_: Union[str, Any]=False ) -> Any:
UpperCAmelCase_ : Optional[Any] = """fp16""" if fpaa else None
UpperCAmelCase_ : str = torch.floataa if fpaa else torch.floataa
UpperCAmelCase_ : int = AutoencoderKL.from_pretrained(
lowerCamelCase_ ,subfolder="""vae""" ,torch_dtype=lowerCamelCase_ ,revision=lowerCamelCase_ ,)
model.to(lowerCamelCase_ ).eval()
return model
def A__ ( self: Dict ,lowerCamelCase_: Union[str, Any]=0 ) -> Optional[int]:
if torch_device == "mps":
return torch.manual_seed(lowerCamelCase_ )
return torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_6_0_3, 0.9_8_7_8, -0.0_4_9_5, -0.0_7_9_0, -0.2_7_0_9, 0.8_3_7_5, -0.2_0_6_0, -0.0_8_2_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]],
[47, [-0.2_3_7_6, 0.1_1_6_8, 0.1_3_3_2, -0.4_8_4_0, -0.2_5_0_8, -0.0_7_9_1, -0.0_4_9_3, -0.4_0_8_9], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]],
# fmt: on
] )
def A__ ( self: List[Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: str ,lowerCamelCase_: Dict ) -> Tuple:
UpperCAmelCase_ : List[Any] = self.get_sd_vae_model()
UpperCAmelCase_ : int = self.get_sd_image(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = self.get_generator(lowerCamelCase_ )
with torch.no_grad():
UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ ,generator=lowerCamelCase_ ,sample_posterior=lowerCamelCase_ ).sample
assert sample.shape == image.shape
UpperCAmelCase_ : Optional[Any] = sample[-1, -2:, -2:, :2].flatten().float().cpu()
UpperCAmelCase_ : Tuple = torch.tensor(expected_slice_mps if torch_device == """mps""" else expected_slice )
assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0_5_1_3, 0.0_2_8_9, 1.3_7_9_9, 0.2_1_6_6, -0.2_5_7_3, -0.0_8_7_1, 0.5_1_0_3, -0.0_9_9_9]],
[47, [-0.4_1_2_8, -0.1_3_2_0, -0.3_7_0_4, 0.1_9_6_5, -0.4_1_1_6, -0.2_3_3_2, -0.3_3_4_0, 0.2_2_4_7]],
# fmt: on
] )
@require_torch_gpu
def A__ ( self: Union[str, Any] ,lowerCamelCase_: Any ,lowerCamelCase_: List[str] ) -> Tuple:
UpperCAmelCase_ : List[str] = self.get_sd_vae_model(fpaa=lowerCamelCase_ )
UpperCAmelCase_ : Any = self.get_sd_image(lowerCamelCase_ ,fpaa=lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = self.get_generator(lowerCamelCase_ )
with torch.no_grad():
UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ ,generator=lowerCamelCase_ ,sample_posterior=lowerCamelCase_ ).sample
assert sample.shape == image.shape
UpperCAmelCase_ : Tuple = sample[-1, -2:, :2, -2:].flatten().float().cpu()
UpperCAmelCase_ : Optional[int] = torch.tensor(lowerCamelCase_ )
assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_6_0_9, 0.9_8_6_6, -0.0_4_8_7, -0.0_7_7_7, -0.2_7_1_6, 0.8_3_6_8, -0.2_0_5_5, -0.0_8_1_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]],
[47, [-0.2_3_7_7, 0.1_1_4_7, 0.1_3_3_3, -0.4_8_4_1, -0.2_5_0_6, -0.0_8_0_5, -0.0_4_9_1, -0.4_0_8_5], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]],
# fmt: on
] )
def A__ ( self: Tuple ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: List[str] ) -> Dict:
UpperCAmelCase_ : Optional[int] = self.get_sd_vae_model()
UpperCAmelCase_ : Dict = self.get_sd_image(lowerCamelCase_ )
with torch.no_grad():
UpperCAmelCase_ : str = model(lowerCamelCase_ ).sample
assert sample.shape == image.shape
UpperCAmelCase_ : List[Any] = sample[-1, -2:, -2:, :2].flatten().float().cpu()
UpperCAmelCase_ : Any = torch.tensor(expected_slice_mps if torch_device == """mps""" else expected_slice )
assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2_0_5_1, -0.1_8_0_3, -0.2_3_1_1, -0.2_1_1_4, -0.3_2_9_2, -0.3_5_7_4, -0.2_9_5_3, -0.3_3_2_3]],
[37, [-0.2_6_3_2, -0.2_6_2_5, -0.2_1_9_9, -0.2_7_4_1, -0.4_5_3_9, -0.4_9_9_0, -0.3_7_2_0, -0.4_9_2_5]],
# fmt: on
] )
@require_torch_gpu
def A__ ( self: Optional[Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: str ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = self.get_sd_vae_model()
UpperCAmelCase_ : Optional[int] = self.get_sd_image(lowerCamelCase_ ,shape=(3, 4, 64, 64) )
with torch.no_grad():
UpperCAmelCase_ : str = model.decode(lowerCamelCase_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
UpperCAmelCase_ : Any = sample[-1, -2:, :2, -2:].flatten().cpu()
UpperCAmelCase_ : Union[str, Any] = torch.tensor(lowerCamelCase_ )
assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=1e-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0_3_6_9, 0.0_2_0_7, -0.0_7_7_6, -0.0_6_8_2, -0.1_7_4_7, -0.1_9_3_0, -0.1_4_6_5, -0.2_0_3_9]],
[16, [-0.1_6_2_8, -0.2_1_3_4, -0.2_7_4_7, -0.2_6_4_2, -0.3_7_7_4, -0.4_4_0_4, -0.3_6_8_7, -0.4_2_7_7]],
# fmt: on
] )
@require_torch_gpu
def A__ ( self: str ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Any ) -> Optional[Any]:
UpperCAmelCase_ : Dict = self.get_sd_vae_model(fpaa=lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = self.get_sd_image(lowerCamelCase_ ,shape=(3, 4, 64, 64) ,fpaa=lowerCamelCase_ )
with torch.no_grad():
UpperCAmelCase_ : List[str] = model.decode(lowerCamelCase_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
UpperCAmelCase_ : str = sample[-1, -2:, :2, -2:].flatten().float().cpu()
UpperCAmelCase_ : str = torch.tensor(lowerCamelCase_ )
assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=5e-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() ,reason="""xformers is not required when using PyTorch 2.0.""" )
def A__ ( self: List[Any] ,lowerCamelCase_: Union[str, Any] ) -> int:
UpperCAmelCase_ : Optional[Any] = self.get_sd_vae_model(fpaa=lowerCamelCase_ )
UpperCAmelCase_ : List[str] = self.get_sd_image(lowerCamelCase_ ,shape=(3, 4, 64, 64) ,fpaa=lowerCamelCase_ )
with torch.no_grad():
UpperCAmelCase_ : Optional[Any] = model.decode(lowerCamelCase_ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
UpperCAmelCase_ : List[str] = model.decode(lowerCamelCase_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=1e-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() ,reason="""xformers is not required when using PyTorch 2.0.""" )
def A__ ( self: Optional[Any] ,lowerCamelCase_: Dict ) -> Union[str, Any]:
UpperCAmelCase_ : Tuple = self.get_sd_vae_model()
UpperCAmelCase_ : Any = self.get_sd_image(lowerCamelCase_ ,shape=(3, 4, 64, 64) )
with torch.no_grad():
UpperCAmelCase_ : Union[str, Any] = model.decode(lowerCamelCase_ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
UpperCAmelCase_ : Optional[Any] = model.decode(lowerCamelCase_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3_0_0_1, 0.0_9_1_8, -2.6_9_8_4, -3.9_7_2_0, -3.2_0_9_9, -5.0_3_5_3, 1.7_3_3_8, -0.2_0_6_5, 3.4_2_6_7]],
[47, [-1.5_0_3_0, -4.3_8_7_1, -6.0_3_5_5, -9.1_1_5_7, -1.6_6_6_1, -2.7_8_5_3, 2.1_6_0_7, -5.0_8_2_3, 2.5_6_3_3]],
# fmt: on
] )
def A__ ( self: Union[str, Any] ,lowerCamelCase_: Any ,lowerCamelCase_: Union[str, Any] ) -> Union[str, Any]:
UpperCAmelCase_ : Dict = self.get_sd_vae_model()
UpperCAmelCase_ : Optional[Any] = self.get_sd_image(lowerCamelCase_ )
UpperCAmelCase_ : str = self.get_generator(lowerCamelCase_ )
with torch.no_grad():
UpperCAmelCase_ : int = model.encode(lowerCamelCase_ ).latent_dist
UpperCAmelCase_ : Optional[Any] = dist.sample(generator=lowerCamelCase_ )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
UpperCAmelCase_ : Tuple = sample[0, -1, -3:, -3:].flatten().cpu()
UpperCAmelCase_ : Optional[Any] = torch.tensor(lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = 3e-3 if torch_device != """mps""" else 1e-2
assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=lowerCamelCase_ )
| 345 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'''microsoft/trocr-base-handwritten''': (
'''https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'''
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : Tuple = "trocr"
A__ : Optional[Any] = ["past_key_values"]
A__ : List[str] = {
"num_attention_heads": "decoder_attention_heads",
"hidden_size": "d_model",
"num_hidden_layers": "decoder_layers",
}
def __init__( self: Tuple ,lowerCamelCase_: Dict=50265 ,lowerCamelCase_: List[Any]=1024 ,lowerCamelCase_: List[Any]=12 ,lowerCamelCase_: str=16 ,lowerCamelCase_: Dict=4096 ,lowerCamelCase_: Union[str, Any]="gelu" ,lowerCamelCase_: Any=512 ,lowerCamelCase_: str=0.1 ,lowerCamelCase_: Optional[Any]=0.0 ,lowerCamelCase_: str=0.0 ,lowerCamelCase_: List[Any]=2 ,lowerCamelCase_: List[Any]=0.0_2 ,lowerCamelCase_: Optional[Any]=0.0 ,lowerCamelCase_: Optional[int]=True ,lowerCamelCase_: int=False ,lowerCamelCase_: Optional[int]=True ,lowerCamelCase_: List[Any]=True ,lowerCamelCase_: Dict=1 ,lowerCamelCase_: List[str]=0 ,lowerCamelCase_: Optional[Any]=2 ,**lowerCamelCase_: Tuple ,) -> List[Any]:
UpperCAmelCase_ : List[str] = vocab_size
UpperCAmelCase_ : Union[str, Any] = d_model
UpperCAmelCase_ : Tuple = decoder_layers
UpperCAmelCase_ : List[str] = decoder_attention_heads
UpperCAmelCase_ : Optional[Any] = decoder_ffn_dim
UpperCAmelCase_ : Union[str, Any] = activation_function
UpperCAmelCase_ : List[str] = max_position_embeddings
UpperCAmelCase_ : List[str] = dropout
UpperCAmelCase_ : List[Any] = attention_dropout
UpperCAmelCase_ : List[Any] = activation_dropout
UpperCAmelCase_ : Tuple = init_std
UpperCAmelCase_ : List[Any] = decoder_layerdrop
UpperCAmelCase_ : Optional[int] = use_cache
UpperCAmelCase_ : Union[str, Any] = scale_embedding
UpperCAmelCase_ : Any = use_learned_position_embeddings
UpperCAmelCase_ : int = layernorm_embedding
super().__init__(
pad_token_id=lowerCamelCase_ ,bos_token_id=lowerCamelCase_ ,eos_token_id=lowerCamelCase_ ,decoder_start_token_id=lowerCamelCase_ ,**lowerCamelCase_ ,)
| 345 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=__snake_case )
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : str = field(default="automatic-speech-recognition" , metadata={"include_in_asdict_even_if_is_default": True} )
A__ : ClassVar[Features] = Features({"audio": Audio()} )
A__ : ClassVar[Features] = Features({"transcription": Value("string" )} )
A__ : str = "audio"
A__ : str = "transcription"
def A__ ( self: int ,lowerCamelCase_: Union[str, Any] ) -> Optional[Any]:
if self.audio_column not in features:
raise ValueError(F'''Column {self.audio_column} is not present in features.''' )
if not isinstance(features[self.audio_column] ,lowerCamelCase_ ):
raise ValueError(F'''Column {self.audio_column} is not an Audio type.''' )
UpperCAmelCase_ : Any = copy.deepcopy(self )
UpperCAmelCase_ : Union[str, Any] = self.input_schema.copy()
UpperCAmelCase_ : Any = features[self.audio_column]
UpperCAmelCase_ : Union[str, Any] = input_schema
return task_template
@property
def A__ ( self: List[str] ) -> Dict[str, str]:
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 345 | 1 |
class _snake_case :
'''simple docstring'''
def __init__( self: int ,lowerCamelCase_: int ) -> int:
UpperCAmelCase_ : Tuple = n
UpperCAmelCase_ : Tuple = [None] * self.n
UpperCAmelCase_ : Optional[int] = 0 # index of the first element
UpperCAmelCase_ : str = 0
UpperCAmelCase_ : List[str] = 0
def __len__( self: Union[str, Any] ) -> int:
return self.size
def A__ ( self: Optional[int] ) -> bool:
return self.size == 0
def A__ ( self: Union[str, Any] ) -> Tuple:
return False if self.is_empty() else self.array[self.front]
def A__ ( self: Dict ,lowerCamelCase_: Tuple ) -> str:
if self.size >= self.n:
raise Exception("""QUEUE IS FULL""" )
UpperCAmelCase_ : List[str] = data
UpperCAmelCase_ : Tuple = (self.rear + 1) % self.n
self.size += 1
return self
def A__ ( self: Any ) -> Union[str, Any]:
if self.size == 0:
raise Exception("""UNDERFLOW""" )
UpperCAmelCase_ : int = self.array[self.front]
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : List[str] = (self.front + 1) % self.n
self.size -= 1
return temp
| 345 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'''microsoft/layoutlmv3-base''': '''https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json''',
}
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : Optional[Any] = "layoutlmv3"
def __init__( self: str ,lowerCamelCase_: Any=50265 ,lowerCamelCase_: int=768 ,lowerCamelCase_: Any=12 ,lowerCamelCase_: Any=12 ,lowerCamelCase_: List[Any]=3072 ,lowerCamelCase_: str="gelu" ,lowerCamelCase_: List[str]=0.1 ,lowerCamelCase_: Any=0.1 ,lowerCamelCase_: Tuple=512 ,lowerCamelCase_: Union[str, Any]=2 ,lowerCamelCase_: Dict=0.0_2 ,lowerCamelCase_: List[str]=1e-5 ,lowerCamelCase_: int=1 ,lowerCamelCase_: int=0 ,lowerCamelCase_: List[str]=2 ,lowerCamelCase_: Dict=1024 ,lowerCamelCase_: Tuple=128 ,lowerCamelCase_: Tuple=128 ,lowerCamelCase_: Dict=True ,lowerCamelCase_: Union[str, Any]=32 ,lowerCamelCase_: Union[str, Any]=128 ,lowerCamelCase_: Tuple=64 ,lowerCamelCase_: Tuple=256 ,lowerCamelCase_: List[str]=True ,lowerCamelCase_: Optional[int]=True ,lowerCamelCase_: Any=True ,lowerCamelCase_: Dict=224 ,lowerCamelCase_: Optional[int]=3 ,lowerCamelCase_: Optional[int]=16 ,lowerCamelCase_: Dict=None ,**lowerCamelCase_: str ,) -> List[Any]:
super().__init__(
vocab_size=lowerCamelCase_ ,hidden_size=lowerCamelCase_ ,num_hidden_layers=lowerCamelCase_ ,num_attention_heads=lowerCamelCase_ ,intermediate_size=lowerCamelCase_ ,hidden_act=lowerCamelCase_ ,hidden_dropout_prob=lowerCamelCase_ ,attention_probs_dropout_prob=lowerCamelCase_ ,max_position_embeddings=lowerCamelCase_ ,type_vocab_size=lowerCamelCase_ ,initializer_range=lowerCamelCase_ ,layer_norm_eps=lowerCamelCase_ ,pad_token_id=lowerCamelCase_ ,bos_token_id=lowerCamelCase_ ,eos_token_id=lowerCamelCase_ ,**lowerCamelCase_ ,)
UpperCAmelCase_ : List[Any] = max_ad_position_embeddings
UpperCAmelCase_ : Optional[int] = coordinate_size
UpperCAmelCase_ : Optional[int] = shape_size
UpperCAmelCase_ : Optional[Any] = has_relative_attention_bias
UpperCAmelCase_ : Optional[int] = rel_pos_bins
UpperCAmelCase_ : Union[str, Any] = max_rel_pos
UpperCAmelCase_ : Dict = has_spatial_attention_bias
UpperCAmelCase_ : Optional[int] = rel_ad_pos_bins
UpperCAmelCase_ : Tuple = max_rel_ad_pos
UpperCAmelCase_ : Union[str, Any] = text_embed
UpperCAmelCase_ : Optional[Any] = visual_embed
UpperCAmelCase_ : List[str] = input_size
UpperCAmelCase_ : str = num_channels
UpperCAmelCase_ : Optional[int] = patch_size
UpperCAmelCase_ : Tuple = classifier_dropout
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : Optional[Any] = version.parse("1.12" )
@property
def A__ ( self: Dict ) -> Mapping[str, Mapping[int, str]]:
# The order of inputs is different for question answering and sequence classification
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
else:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels"""}),
] )
@property
def A__ ( self: Any ) -> float:
return 1e-5
@property
def A__ ( self: int ) -> int:
return 12
def A__ ( self: List[str] ,lowerCamelCase_: "ProcessorMixin" ,lowerCamelCase_: int = -1 ,lowerCamelCase_: int = -1 ,lowerCamelCase_: bool = False ,lowerCamelCase_: Optional["TensorType"] = None ,lowerCamelCase_: int = 3 ,lowerCamelCase_: int = 40 ,lowerCamelCase_: int = 40 ,) -> Mapping[str, Any]:
setattr(processor.image_processor ,"""apply_ocr""" ,lowerCamelCase_ )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCAmelCase_ : List[str] = compute_effective_axis_dimension(
lowerCamelCase_ ,fixed_dimension=OnnxConfig.default_fixed_batch ,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCAmelCase_ : int = processor.tokenizer.num_special_tokens_to_add(lowerCamelCase_ )
UpperCAmelCase_ : int = compute_effective_axis_dimension(
lowerCamelCase_ ,fixed_dimension=OnnxConfig.default_fixed_sequence ,num_token_to_add=lowerCamelCase_ )
# Generate dummy inputs according to compute batch and sequence
UpperCAmelCase_ : Optional[int] = [[""" """.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
UpperCAmelCase_ : List[Any] = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
UpperCAmelCase_ : Any = self._generate_dummy_images(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = dict(
processor(
lowerCamelCase_ ,text=lowerCamelCase_ ,boxes=lowerCamelCase_ ,return_tensors=lowerCamelCase_ ,) )
return inputs
| 345 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase_ = {'''configuration_vit''': ['''VIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTConfig''', '''ViTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ['''ViTFeatureExtractor''']
UpperCamelCase_ = ['''ViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'''VIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTForImageClassification''',
'''ViTForMaskedImageModeling''',
'''ViTModel''',
'''ViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'''TFViTForImageClassification''',
'''TFViTModel''',
'''TFViTPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'''FlaxViTForImageClassification''',
'''FlaxViTModel''',
'''FlaxViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 345 |
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def lowerCamelCase_ ( _a : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = [
"""decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(_a , _a )
def lowerCamelCase_ ( _a : Any ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = emb.weight.shape
UpperCAmelCase_ : Tuple = nn.Linear(_a , _a , bias=_a )
UpperCAmelCase_ : List[Any] = emb.weight.data
return lin_layer
def lowerCamelCase_ ( _a : Dict ):
'''simple docstring'''
UpperCAmelCase_ : int = torch.load(_a , map_location="""cpu""" )
UpperCAmelCase_ : Dict = Namespace(**checkpoint["""cfg"""]["""model"""] )
UpperCAmelCase_ : Optional[int] = checkpoint["""model"""]
remove_ignore_keys_(_a )
UpperCAmelCase_ : str = state_dict["""decoder.embed_tokens.weight"""].shape[0]
UpperCAmelCase_ : List[str] = {key.replace("""decoder""" , """model""" ): val for key, val in state_dict.items()}
UpperCAmelCase_ : int = XGLMConfig(
vocab_size=_a , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""gelu""" , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , )
UpperCAmelCase_ : List[str] = XGLMForCausalLM(_a )
UpperCAmelCase_ : Tuple = model.load_state_dict(_a , strict=_a )
print(_a )
UpperCAmelCase_ : Optional[Any] = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
UpperCamelCase_ = parser.parse_args()
UpperCamelCase_ = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 345 | 1 |
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def lowerCamelCase_ ( _a : Union[str, Any] , _a : List[str] , _a : Optional[int] , _a : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Any = {
"""en""": """Machine learning is great, isn't it?""",
"""ru""": """Машинное обучение - это здорово, не так ли?""",
"""de""": """Maschinelles Lernen ist großartig, nicht wahr?""",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
UpperCAmelCase_ : List[Any] = {
"""wmt16-en-de-dist-12-1""": [2_8.3, 2_7.5_2],
"""wmt16-en-de-dist-6-1""": [2_7.4, 2_7.1_1],
"""wmt16-en-de-12-1""": [2_6.9, 2_5.7_5],
}
UpperCAmelCase_ : List[str] = F'''{src_lang}-{tgt_lang}'''
UpperCAmelCase_ : Dict = F'''
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt16
- allenai
license: apache-2.0
datasets:
- wmt16
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.
For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).
All 3 models are available:
* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)
* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)
* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = "allenai/{model_name}"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = "{texts[src_lang]}"
input_ids = tokenizer.encode(input, return_tensors="pt")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
## Training data
Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).
## Eval results
Here are the BLEU scores:
model | fairseq | transformers
-------|---------|----------
{model_name} | {scores[model_name][0]} | {scores[model_name][1]}
The score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=5
mkdir -p $DATA_DIR
sacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
## Data Sources
- [training, etc.](http://www.statmt.org/wmt16/)
- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)
### BibTeX entry and citation info
```
@misc{{kasai2020deep,
title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},
author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},
year={{2020}},
eprint={{2006.10369}},
archivePrefix={{arXiv}},
primaryClass={{cs.CL}}
}}
```
'''
model_card_dir.mkdir(parents=_a , exist_ok=_a )
UpperCAmelCase_ : Any = os.path.join(_a , """README.md""" )
print(F'''Generating {path}''' )
with open(_a , """w""" , encoding="""utf-8""" ) as f:
f.write(_a )
# make sure we are under the root of the project
UpperCamelCase_ = Path(__file__).resolve().parent.parent.parent
UpperCamelCase_ = repo_dir / '''model_cards'''
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
UpperCamelCase_ = model_cards_dir / '''allenai''' / model_name
write_model_card(model_card_dir, src_lang='''en''', tgt_lang='''de''', model_name=model_name)
| 345 |
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _snake_case :
'''simple docstring'''
def __init__( self: List[Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Union[str, Any]=13 ,lowerCamelCase_: Optional[int]=32 ,lowerCamelCase_: List[str]=2 ,lowerCamelCase_: Optional[Any]=3 ,lowerCamelCase_: int=16 ,lowerCamelCase_: Optional[Any]=[32, 64, 128] ,lowerCamelCase_: Optional[int]=[1, 2, 1] ,lowerCamelCase_: Union[str, Any]=[2, 2, 4] ,lowerCamelCase_: int=2 ,lowerCamelCase_: List[str]=2.0 ,lowerCamelCase_: List[Any]=True ,lowerCamelCase_: List[str]=0.0 ,lowerCamelCase_: List[str]=0.0 ,lowerCamelCase_: Optional[int]=0.1 ,lowerCamelCase_: Optional[int]="gelu" ,lowerCamelCase_: Any=False ,lowerCamelCase_: Dict=True ,lowerCamelCase_: Union[str, Any]=0.0_2 ,lowerCamelCase_: int=1e-5 ,lowerCamelCase_: int=True ,lowerCamelCase_: Tuple=None ,lowerCamelCase_: str=True ,lowerCamelCase_: Dict=10 ,lowerCamelCase_: str=8 ,lowerCamelCase_: Union[str, Any]=["stage1", "stage2"] ,lowerCamelCase_: Optional[Any]=[1, 2] ,) -> str:
UpperCAmelCase_ : List[Any] = parent
UpperCAmelCase_ : Tuple = batch_size
UpperCAmelCase_ : Any = image_size
UpperCAmelCase_ : str = patch_size
UpperCAmelCase_ : List[str] = num_channels
UpperCAmelCase_ : Dict = embed_dim
UpperCAmelCase_ : Dict = hidden_sizes
UpperCAmelCase_ : str = depths
UpperCAmelCase_ : int = num_heads
UpperCAmelCase_ : List[Any] = window_size
UpperCAmelCase_ : Union[str, Any] = mlp_ratio
UpperCAmelCase_ : int = qkv_bias
UpperCAmelCase_ : List[str] = hidden_dropout_prob
UpperCAmelCase_ : Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase_ : Optional[int] = drop_path_rate
UpperCAmelCase_ : Union[str, Any] = hidden_act
UpperCAmelCase_ : List[Any] = use_absolute_embeddings
UpperCAmelCase_ : List[Any] = patch_norm
UpperCAmelCase_ : int = layer_norm_eps
UpperCAmelCase_ : int = initializer_range
UpperCAmelCase_ : Optional[Any] = is_training
UpperCAmelCase_ : Optional[Any] = scope
UpperCAmelCase_ : Union[str, Any] = use_labels
UpperCAmelCase_ : Union[str, Any] = type_sequence_label_size
UpperCAmelCase_ : Optional[int] = encoder_stride
UpperCAmelCase_ : Optional[int] = out_features
UpperCAmelCase_ : Optional[int] = out_indices
def A__ ( self: Union[str, Any] ) -> List[Any]:
UpperCAmelCase_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ : int = None
if self.use_labels:
UpperCAmelCase_ : str = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase_ : Any = self.get_config()
return config, pixel_values, labels
def A__ ( self: List[Any] ) -> Tuple:
return FocalNetConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,num_heads=self.num_heads ,window_size=self.window_size ,mlp_ratio=self.mlp_ratio ,qkv_bias=self.qkv_bias ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,drop_path_rate=self.drop_path_rate ,hidden_act=self.hidden_act ,use_absolute_embeddings=self.use_absolute_embeddings ,path_norm=self.patch_norm ,layer_norm_eps=self.layer_norm_eps ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,out_features=self.out_features ,out_indices=self.out_indices ,)
def A__ ( self: Dict ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: str ,lowerCamelCase_: str ) -> List[str]:
UpperCAmelCase_ : Optional[int] = FocalNetModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : List[Any] = model(lowerCamelCase_ )
UpperCAmelCase_ : Dict = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
UpperCAmelCase_ : Optional[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, expected_seq_len, expected_dim) )
def A__ ( self: Union[str, Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Any ,lowerCamelCase_: Optional[int] ) -> List[str]:
UpperCAmelCase_ : List[str] = FocalNetBackbone(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Tuple = model(lowerCamelCase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,len(config.out_features ) )
self.parent.assertListEqual(model.channels ,config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : List[str] = FocalNetBackbone(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Tuple = model(lowerCamelCase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,1 )
self.parent.assertListEqual(model.channels ,[config.hidden_sizes[-1]] )
def A__ ( self: Optional[int] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Union[str, Any] ) -> List[Any]:
UpperCAmelCase_ : Any = FocalNetForMaskedImageModeling(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(lowerCamelCase_ )
self.parent.assertEqual(
result.reconstruction.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCAmelCase_ : int = 1
UpperCAmelCase_ : List[str] = FocalNetForMaskedImageModeling(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ : Optional[int] = model(lowerCamelCase_ )
self.parent.assertEqual(result.reconstruction.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def A__ ( self: List[str] ,lowerCamelCase_: List[str] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Any ) -> int:
UpperCAmelCase_ : List[Any] = self.type_sequence_label_size
UpperCAmelCase_ : int = FocalNetForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ ,labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase_ : List[Any] = 1
UpperCAmelCase_ : Optional[int] = FocalNetForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ : List[str] = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def A__ ( self: Union[str, Any] ) -> Optional[int]:
UpperCAmelCase_ : List[Any] = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = config_and_inputs
UpperCAmelCase_ : int = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _snake_case ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
A__ : List[Any] = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
A__ : Union[str, Any] = (
{"feature-extraction": FocalNetModel, "image-classification": FocalNetForImageClassification}
if is_torch_available()
else {}
)
A__ : Optional[Any] = False
A__ : Any = False
A__ : List[str] = False
A__ : Any = False
A__ : Any = False
def A__ ( self: List[str] ) -> Tuple:
UpperCAmelCase_ : Dict = FocalNetModelTester(self )
UpperCAmelCase_ : int = ConfigTester(self ,config_class=lowerCamelCase_ ,embed_dim=37 ,has_text_modality=lowerCamelCase_ )
def A__ ( self: List[str] ) -> int:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A__ ( self: List[str] ) -> Union[str, Any]:
return
def A__ ( self: str ) -> List[str]:
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def A__ ( self: Tuple ) -> int:
UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowerCamelCase_ )
def A__ ( self: Dict ) -> List[str]:
UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase_ )
def A__ ( self: int ) -> int:
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
@unittest.skip(reason="""FocalNet does not use inputs_embeds""" )
def A__ ( self: int ) -> Dict:
pass
@unittest.skip(reason="""FocalNet does not use feedforward chunking""" )
def A__ ( self: Optional[Any] ) -> Optional[Any]:
pass
def A__ ( self: Optional[Any] ) -> List[str]:
UpperCAmelCase_ , UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
UpperCAmelCase_ : Optional[Any] = model_class(lowerCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
UpperCAmelCase_ : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase_ ,nn.Linear ) )
def A__ ( self: str ) -> Optional[int]:
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
UpperCAmelCase_ : str = model_class(lowerCamelCase_ )
UpperCAmelCase_ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : Any = [*signature.parameters.keys()]
UpperCAmelCase_ : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,lowerCamelCase_ )
def A__ ( self: Dict ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Dict ,lowerCamelCase_: Any ) -> List[str]:
UpperCAmelCase_ : Tuple = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : Optional[int] = model(**self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) )
UpperCAmelCase_ : Any = outputs.hidden_states
UpperCAmelCase_ : List[Any] = getattr(
self.model_tester ,"""expected_num_hidden_layers""" ,len(self.model_tester.depths ) + 1 )
self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ )
# FocalNet has a different seq_length
UpperCAmelCase_ : int = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCAmelCase_ : Optional[int] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
UpperCAmelCase_ : Union[str, Any] = outputs.reshaped_hidden_states
self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = reshaped_hidden_states[0].shape
UpperCAmelCase_ : List[Any] = (
reshaped_hidden_states[0].view(lowerCamelCase_ ,lowerCamelCase_ ,height * width ).permute(0 ,2 ,1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
def A__ ( self: Any ) -> List[Any]:
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Optional[int] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
UpperCAmelCase_ : str = True
self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ : Union[str, Any] = True
self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
def A__ ( self: List[str] ) -> str:
UpperCAmelCase_ , UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Tuple = 3
UpperCAmelCase_ : Tuple = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
UpperCAmelCase_ : Union[str, Any] = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCAmelCase_ : Union[str, Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
UpperCAmelCase_ : Any = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
UpperCAmelCase_ : Optional[Any] = True
self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,(padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ : Optional[int] = True
self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,(padded_height, padded_width) )
@slow
def A__ ( self: Optional[int] ) -> Optional[Any]:
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Tuple = FocalNetModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def A__ ( self: Optional[Any] ) -> Optional[int]:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Optional[int] = _config_zero_init(lowerCamelCase_ )
for model_class in self.all_model_classes:
UpperCAmelCase_ : List[Any] = model_class(config=lowerCamelCase_ )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' ,)
@require_vision
@require_torch
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def A__ ( self: Optional[int] ) -> str:
# TODO update organization
return AutoImageProcessor.from_pretrained("""microsoft/focalnet-tiny""" ) if is_vision_available() else None
@slow
def A__ ( self: List[Any] ) -> List[str]:
UpperCAmelCase_ : Optional[int] = FocalNetForImageClassification.from_pretrained("""microsoft/focalnet-tiny""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : Tuple = self.default_image_processor
UpperCAmelCase_ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
UpperCAmelCase_ : Dict = image_processor(images=lowerCamelCase_ ,return_tensors="""pt""" ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : Dict = model(**lowerCamelCase_ )
# verify the logits
UpperCAmelCase_ : str = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,lowerCamelCase_ ,atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() ,281 )
@require_torch
class _snake_case ( __snake_case , unittest.TestCase ):
'''simple docstring'''
A__ : List[Any] = (FocalNetBackbone,) if is_torch_available() else ()
A__ : int = FocalNetConfig
A__ : List[str] = False
def A__ ( self: Any ) -> Optional[int]:
UpperCAmelCase_ : str = FocalNetModelTester(self )
| 345 | 1 |
import pickle
import numpy as np
from matplotlib import pyplot as plt
class _snake_case :
'''simple docstring'''
def __init__( self: Any ,lowerCamelCase_: Dict ,lowerCamelCase_: Tuple ,lowerCamelCase_: Dict ,lowerCamelCase_: Tuple ,lowerCamelCase_: Any ,lowerCamelCase_: Tuple=0.2 ,lowerCamelCase_: Union[str, Any]=0.2 ) -> List[str]:
UpperCAmelCase_ : List[Any] = bp_numa
UpperCAmelCase_ : str = bp_numa
UpperCAmelCase_ : List[Any] = bp_numa
UpperCAmelCase_ : Optional[int] = conva_get[:2]
UpperCAmelCase_ : List[Any] = conva_get[2]
UpperCAmelCase_ : str = size_pa
UpperCAmelCase_ : Optional[int] = rate_w
UpperCAmelCase_ : Dict = rate_t
UpperCAmelCase_ : List[Any] = [
np.mat(-1 * np.random.rand(self.conva[0] ,self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
UpperCAmelCase_ : int = np.mat(-1 * np.random.rand(self.num_bpa ,self.num_bpa ) + 0.5 )
UpperCAmelCase_ : int = np.mat(-1 * np.random.rand(self.num_bpa ,self.num_bpa ) + 0.5 )
UpperCAmelCase_ : Dict = -2 * np.random.rand(self.conva[1] ) + 1
UpperCAmelCase_ : str = -2 * np.random.rand(self.num_bpa ) + 1
UpperCAmelCase_ : Union[str, Any] = -2 * np.random.rand(self.num_bpa ) + 1
def A__ ( self: str ,lowerCamelCase_: Optional[Any] ) -> Tuple:
# save model dict with pickle
UpperCAmelCase_ : Dict = {
"""num_bp1""": self.num_bpa,
"""num_bp2""": self.num_bpa,
"""num_bp3""": self.num_bpa,
"""conv1""": self.conva,
"""step_conv1""": self.step_conva,
"""size_pooling1""": self.size_poolinga,
"""rate_weight""": self.rate_weight,
"""rate_thre""": self.rate_thre,
"""w_conv1""": self.w_conva,
"""wkj""": self.wkj,
"""vji""": self.vji,
"""thre_conv1""": self.thre_conva,
"""thre_bp2""": self.thre_bpa,
"""thre_bp3""": self.thre_bpa,
}
with open(lowerCamelCase_ ,"""wb""" ) as f:
pickle.dump(lowerCamelCase_ ,lowerCamelCase_ )
print(F'''Model saved: {save_path}''' )
@classmethod
def A__ ( cls: List[str] ,lowerCamelCase_: str ) -> List[str]:
# read saved model
with open(lowerCamelCase_ ,"""rb""" ) as f:
UpperCAmelCase_ : Any = pickle.load(lowerCamelCase_ ) # noqa: S301
UpperCAmelCase_ : Union[str, Any] = model_dic.get("""conv1""" )
conv_get.append(model_dic.get("""step_conv1""" ) )
UpperCAmelCase_ : List[str] = model_dic.get("""size_pooling1""" )
UpperCAmelCase_ : Tuple = model_dic.get("""num_bp1""" )
UpperCAmelCase_ : Optional[Any] = model_dic.get("""num_bp2""" )
UpperCAmelCase_ : List[str] = model_dic.get("""num_bp3""" )
UpperCAmelCase_ : List[Any] = model_dic.get("""rate_weight""" )
UpperCAmelCase_ : Dict = model_dic.get("""rate_thre""" )
# create model instance
UpperCAmelCase_ : List[Any] = CNN(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
# modify model parameter
UpperCAmelCase_ : Any = model_dic.get("""w_conv1""" )
UpperCAmelCase_ : int = model_dic.get("""wkj""" )
UpperCAmelCase_ : int = model_dic.get("""vji""" )
UpperCAmelCase_ : Optional[int] = model_dic.get("""thre_conv1""" )
UpperCAmelCase_ : List[str] = model_dic.get("""thre_bp2""" )
UpperCAmelCase_ : Dict = model_dic.get("""thre_bp3""" )
return conv_ins
def A__ ( self: List[Any] ,lowerCamelCase_: Union[str, Any] ) -> Tuple:
return 1 / (1 + np.exp(-1 * x ))
def A__ ( self: Union[str, Any] ,lowerCamelCase_: Union[str, Any] ) -> Optional[Any]:
return round(lowerCamelCase_ ,3 )
def A__ ( self: Tuple ,lowerCamelCase_: Any ,lowerCamelCase_: List[str] ,lowerCamelCase_: str ,lowerCamelCase_: Any ,lowerCamelCase_: Union[str, Any] ) -> Any:
# convolution process
UpperCAmelCase_ : Optional[Any] = convs[0]
UpperCAmelCase_ : int = convs[1]
UpperCAmelCase_ : int = np.shape(lowerCamelCase_ )[0]
# get the data slice of original image data, data_focus
UpperCAmelCase_ : Dict = []
for i_focus in range(0 ,size_data - size_conv + 1 ,lowerCamelCase_ ):
for j_focus in range(0 ,size_data - size_conv + 1 ,lowerCamelCase_ ):
UpperCAmelCase_ : Union[str, Any] = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(lowerCamelCase_ )
# calculate the feature map of every single kernel, and saved as list of matrix
UpperCAmelCase_ : Any = []
UpperCAmelCase_ : Tuple = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(lowerCamelCase_ ):
UpperCAmelCase_ : Optional[int] = []
for i_focus in range(len(lowerCamelCase_ ) ):
UpperCAmelCase_ : int = (
np.sum(np.multiply(data_focus[i_focus] ,w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(lowerCamelCase_ ) )
UpperCAmelCase_ : Union[str, Any] = np.asmatrix(lowerCamelCase_ ).reshape(
lowerCamelCase_ ,lowerCamelCase_ )
data_featuremap.append(lowerCamelCase_ )
# expanding the data slice to One dimenssion
UpperCAmelCase_ : Optional[Any] = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(lowerCamelCase_ ) )
UpperCAmelCase_ : Optional[int] = np.asarray(lowerCamelCase_ )
return focus_list, data_featuremap
def A__ ( self: Tuple ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Optional[Any]="average_pool" ) -> List[Any]:
# pooling process
UpperCAmelCase_ : Optional[Any] = len(featuremaps[0] )
UpperCAmelCase_ : Any = int(size_map / size_pooling )
UpperCAmelCase_ : Optional[int] = []
for i_map in range(len(lowerCamelCase_ ) ):
UpperCAmelCase_ : Any = featuremaps[i_map]
UpperCAmelCase_ : Tuple = []
for i_focus in range(0 ,lowerCamelCase_ ,lowerCamelCase_ ):
for j_focus in range(0 ,lowerCamelCase_ ,lowerCamelCase_ ):
UpperCAmelCase_ : str = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(lowerCamelCase_ ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(lowerCamelCase_ ) )
UpperCAmelCase_ : int = np.asmatrix(lowerCamelCase_ ).reshape(lowerCamelCase_ ,lowerCamelCase_ )
featuremap_pooled.append(lowerCamelCase_ )
return featuremap_pooled
def A__ ( self: Union[str, Any] ,lowerCamelCase_: Tuple ) -> Optional[int]:
# expanding three dimension data to one dimension list
UpperCAmelCase_ : List[Any] = []
for i in range(len(lowerCamelCase_ ) ):
UpperCAmelCase_ : Tuple = np.shape(data[i] )
UpperCAmelCase_ : Optional[int] = data[i].reshape(1 ,shapes[0] * shapes[1] )
UpperCAmelCase_ : Optional[int] = data_listed.getA().tolist()[0]
data_expanded.extend(lowerCamelCase_ )
UpperCAmelCase_ : int = np.asarray(lowerCamelCase_ )
return data_expanded
def A__ ( self: Optional[Any] ,lowerCamelCase_: Optional[int] ) -> Union[str, Any]:
# expanding matrix to one dimension list
UpperCAmelCase_ : List[Any] = np.asarray(lowerCamelCase_ )
UpperCAmelCase_ : str = np.shape(lowerCamelCase_ )
UpperCAmelCase_ : Dict = data_mat.reshape(1 ,shapes[0] * shapes[1] )
return data_expanded
def A__ ( self: str ,lowerCamelCase_: Dict ,lowerCamelCase_: int ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Any ) -> Union[str, Any]:
UpperCAmelCase_ : Any = []
UpperCAmelCase_ : Tuple = 0
for i_map in range(lowerCamelCase_ ):
UpperCAmelCase_ : Optional[Any] = np.ones((size_map, size_map) )
for i in range(0 ,lowerCamelCase_ ,lowerCamelCase_ ):
for j in range(0 ,lowerCamelCase_ ,lowerCamelCase_ ):
UpperCAmelCase_ : Any = pd_pool[
i_pool
]
UpperCAmelCase_ : List[str] = i_pool + 1
UpperCAmelCase_ : Optional[Any] = np.multiply(
lowerCamelCase_ ,np.multiply(out_map[i_map] ,(1 - out_map[i_map]) ) )
pd_all.append(lowerCamelCase_ )
return pd_all
def A__ ( self: str ,lowerCamelCase_: int ,lowerCamelCase_: int ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Any ,lowerCamelCase_: List[str] ,lowerCamelCase_: Any=bool ) -> Optional[int]:
# model traning
print("""----------------------Start Training-------------------------""" )
print((""" - - Shape: Train_Data """, np.shape(lowerCamelCase_ )) )
print((""" - - Shape: Teach_Data """, np.shape(lowerCamelCase_ )) )
UpperCAmelCase_ : str = 0
UpperCAmelCase_ : Tuple = []
UpperCAmelCase_ : Any = 10000
while rp < n_repeat and mse >= error_accuracy:
UpperCAmelCase_ : List[str] = 0
print(F'''-------------Learning Time {rp}--------------''' )
for p in range(len(lowerCamelCase_ ) ):
# print('------------Learning Image: %d--------------'%p)
UpperCAmelCase_ : str = np.asmatrix(datas_train[p] )
UpperCAmelCase_ : Optional[Any] = np.asarray(datas_teach[p] )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.convolute(
lowerCamelCase_ ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,)
UpperCAmelCase_ : List[Any] = self.pooling(lowerCamelCase_ ,self.size_poolinga )
UpperCAmelCase_ : int = np.shape(lowerCamelCase_ )
UpperCAmelCase_ : Dict = self._expand(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = data_bp_input
UpperCAmelCase_ : Optional[Any] = np.dot(lowerCamelCase_ ,self.vji.T ) - self.thre_bpa
UpperCAmelCase_ : int = self.sig(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = np.dot(lowerCamelCase_ ,self.wkj.T ) - self.thre_bpa
UpperCAmelCase_ : Optional[Any] = self.sig(lowerCamelCase_ )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
UpperCAmelCase_ : List[str] = np.multiply(
(data_teach - bp_outa) ,np.multiply(lowerCamelCase_ ,(1 - bp_outa) ) )
UpperCAmelCase_ : List[Any] = np.multiply(
np.dot(lowerCamelCase_ ,self.wkj ) ,np.multiply(lowerCamelCase_ ,(1 - bp_outa) ) )
UpperCAmelCase_ : Any = np.dot(lowerCamelCase_ ,self.vji )
UpperCAmelCase_ : Tuple = pd_i_all / (self.size_poolinga * self.size_poolinga)
UpperCAmelCase_ : List[str] = pd_conva_pooled.T.getA().tolist()
UpperCAmelCase_ : str = self._calculate_gradient_from_pool(
lowerCamelCase_ ,lowerCamelCase_ ,shape_featuremapa[0] ,shape_featuremapa[1] ,self.size_poolinga ,)
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
UpperCAmelCase_ : List[str] = self._expand_mat(pd_conva_all[k_conv] )
UpperCAmelCase_ : Optional[Any] = self.rate_weight * np.dot(lowerCamelCase_ ,lowerCamelCase_ )
UpperCAmelCase_ : int = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
UpperCAmelCase_ : str = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
UpperCAmelCase_ : int = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
UpperCAmelCase_ : Tuple = self.vji + pd_j_all.T * bp_outa * self.rate_weight
UpperCAmelCase_ : int = self.thre_bpa - pd_k_all * self.rate_thre
UpperCAmelCase_ : str = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
UpperCAmelCase_ : int = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
UpperCAmelCase_ : int = rp + 1
UpperCAmelCase_ : Any = error_count / patterns
all_mse.append(lowerCamelCase_ )
def draw_error():
UpperCAmelCase_ : Any = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(lowerCamelCase_ ,"""+-""" )
plt.plot(lowerCamelCase_ ,"""r--""" )
plt.xlabel("""Learning Times""" )
plt.ylabel("""All_mse""" )
plt.grid(lowerCamelCase_ ,alpha=0.5 )
plt.show()
print("""------------------Training Complished---------------------""" )
print((""" - - Training epoch: """, rp, F''' - - Mse: {mse:.6f}''') )
if draw_e:
draw_error()
return mse
def A__ ( self: Optional[int] ,lowerCamelCase_: Any ) -> Tuple:
# model predict
UpperCAmelCase_ : Union[str, Any] = []
print("""-------------------Start Testing-------------------------""" )
print((""" - - Shape: Test_Data """, np.shape(lowerCamelCase_ )) )
for p in range(len(lowerCamelCase_ ) ):
UpperCAmelCase_ : int = np.asmatrix(datas_test[p] )
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.convolute(
lowerCamelCase_ ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,)
UpperCAmelCase_ : Optional[Any] = self.pooling(lowerCamelCase_ ,self.size_poolinga )
UpperCAmelCase_ : str = self._expand(lowerCamelCase_ )
UpperCAmelCase_ : str = data_bp_input
UpperCAmelCase_ : Union[str, Any] = bp_outa * self.vji.T - self.thre_bpa
UpperCAmelCase_ : Optional[int] = self.sig(lowerCamelCase_ )
UpperCAmelCase_ : Tuple = bp_outa * self.wkj.T - self.thre_bpa
UpperCAmelCase_ : List[Any] = self.sig(lowerCamelCase_ )
produce_out.extend(bp_outa.getA().tolist() )
UpperCAmelCase_ : int = [list(map(self.do_round ,lowerCamelCase_ ) ) for each in produce_out]
return np.asarray(lowerCamelCase_ )
def A__ ( self: Optional[Any] ,lowerCamelCase_: Dict ) -> Tuple:
# return the data of image after convoluting process so we can check it out
UpperCAmelCase_ : Optional[int] = np.asmatrix(lowerCamelCase_ )
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self.convolute(
lowerCamelCase_ ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,)
UpperCAmelCase_ : Dict = self.pooling(lowerCamelCase_ ,self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 345 |
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _snake_case :
'''simple docstring'''
def __init__( self: Tuple ,lowerCamelCase_: List[str] ,lowerCamelCase_: int=13 ,lowerCamelCase_: int=32 ,lowerCamelCase_: Optional[int]=2 ,lowerCamelCase_: Any=3 ,lowerCamelCase_: str=16 ,lowerCamelCase_: Optional[Any]=[1, 2, 1] ,lowerCamelCase_: Tuple=[2, 2, 4] ,lowerCamelCase_: int=2 ,lowerCamelCase_: List[Any]=2.0 ,lowerCamelCase_: str=True ,lowerCamelCase_: Optional[int]=0.0 ,lowerCamelCase_: List[Any]=0.0 ,lowerCamelCase_: List[str]=0.1 ,lowerCamelCase_: Tuple="gelu" ,lowerCamelCase_: Union[str, Any]=False ,lowerCamelCase_: Union[str, Any]=True ,lowerCamelCase_: Optional[int]=0.0_2 ,lowerCamelCase_: int=1e-5 ,lowerCamelCase_: Optional[int]=True ,lowerCamelCase_: Union[str, Any]=None ,lowerCamelCase_: Union[str, Any]=True ,lowerCamelCase_: Optional[int]=10 ,lowerCamelCase_: Tuple=8 ,) -> List[Any]:
UpperCAmelCase_ : List[str] = parent
UpperCAmelCase_ : int = batch_size
UpperCAmelCase_ : int = image_size
UpperCAmelCase_ : Union[str, Any] = patch_size
UpperCAmelCase_ : Optional[Any] = num_channels
UpperCAmelCase_ : int = embed_dim
UpperCAmelCase_ : Union[str, Any] = depths
UpperCAmelCase_ : List[str] = num_heads
UpperCAmelCase_ : int = window_size
UpperCAmelCase_ : List[str] = mlp_ratio
UpperCAmelCase_ : Tuple = qkv_bias
UpperCAmelCase_ : Tuple = hidden_dropout_prob
UpperCAmelCase_ : str = attention_probs_dropout_prob
UpperCAmelCase_ : Tuple = drop_path_rate
UpperCAmelCase_ : List[str] = hidden_act
UpperCAmelCase_ : int = use_absolute_embeddings
UpperCAmelCase_ : Any = patch_norm
UpperCAmelCase_ : Optional[int] = layer_norm_eps
UpperCAmelCase_ : Tuple = initializer_range
UpperCAmelCase_ : Optional[Any] = is_training
UpperCAmelCase_ : Dict = scope
UpperCAmelCase_ : int = use_labels
UpperCAmelCase_ : Optional[Any] = type_sequence_label_size
UpperCAmelCase_ : List[str] = encoder_stride
def A__ ( self: Any ) -> int:
UpperCAmelCase_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ : List[Any] = None
if self.use_labels:
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase_ : str = self.get_config()
return config, pixel_values, labels
def A__ ( self: List[Any] ) -> Union[str, Any]:
return SwinvaConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,depths=self.depths ,num_heads=self.num_heads ,window_size=self.window_size ,mlp_ratio=self.mlp_ratio ,qkv_bias=self.qkv_bias ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,drop_path_rate=self.drop_path_rate ,hidden_act=self.hidden_act ,use_absolute_embeddings=self.use_absolute_embeddings ,path_norm=self.patch_norm ,layer_norm_eps=self.layer_norm_eps ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,)
def A__ ( self: Dict ,lowerCamelCase_: Tuple ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: List[str] ) -> str:
UpperCAmelCase_ : str = SwinvaModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
UpperCAmelCase_ : List[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, expected_seq_len, expected_dim) )
def A__ ( self: List[Any] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: int ,lowerCamelCase_: int ) -> int:
UpperCAmelCase_ : Any = SwinvaForMaskedImageModeling(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCAmelCase_ : str = 1
UpperCAmelCase_ : Optional[Any] = SwinvaForMaskedImageModeling(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ : int = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def A__ ( self: int ,lowerCamelCase_: int ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Optional[Any] ) -> int:
UpperCAmelCase_ : Union[str, Any] = self.type_sequence_label_size
UpperCAmelCase_ : int = SwinvaForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Optional[int] = model(lowerCamelCase_ ,labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def A__ ( self: str ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[Any] = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = config_and_inputs
UpperCAmelCase_ : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _snake_case ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
A__ : Tuple = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
A__ : Optional[Any] = (
{"feature-extraction": SwinvaModel, "image-classification": SwinvaForImageClassification}
if is_torch_available()
else {}
)
A__ : List[Any] = False
A__ : Tuple = False
A__ : int = False
A__ : Union[str, Any] = False
def A__ ( self: List[str] ) -> Optional[Any]:
UpperCAmelCase_ : Any = SwinvaModelTester(self )
UpperCAmelCase_ : str = ConfigTester(self ,config_class=lowerCamelCase_ ,embed_dim=37 )
def A__ ( self: Optional[int] ) -> List[Any]:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A__ ( self: Any ) -> Dict:
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
@unittest.skip(reason="""Got `CUDA error: misaligned address` with PyTorch 2.0.0.""" )
def A__ ( self: int ) -> Dict:
pass
@unittest.skip(reason="""Swinv2 does not use inputs_embeds""" )
def A__ ( self: Tuple ) -> List[str]:
pass
def A__ ( self: str ) -> List[Any]:
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : int = model_class(lowerCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
UpperCAmelCase_ : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase_ ,nn.Linear ) )
def A__ ( self: Optional[Any] ) -> Optional[int]:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Dict = model_class(lowerCamelCase_ )
UpperCAmelCase_ : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : int = [*signature.parameters.keys()]
UpperCAmelCase_ : Tuple = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,lowerCamelCase_ )
def A__ ( self: Union[str, Any] ) -> Optional[Any]:
UpperCAmelCase_ , UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Any = True
for model_class in self.all_model_classes:
UpperCAmelCase_ : Optional[Any] = True
UpperCAmelCase_ : Union[str, Any] = False
UpperCAmelCase_ : str = True
UpperCAmelCase_ : List[Any] = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : Optional[int] = model(**self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) )
UpperCAmelCase_ : Optional[Any] = outputs.attentions
UpperCAmelCase_ : List[str] = len(self.model_tester.depths )
self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCAmelCase_ : str = True
UpperCAmelCase_ : Optional[Any] = config.window_size**2
UpperCAmelCase_ : Optional[int] = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : Optional[Any] = model(**self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) )
UpperCAmelCase_ : List[Any] = outputs.attentions
self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ )
self.assertListEqual(
list(attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,)
UpperCAmelCase_ : Optional[Any] = len(lowerCamelCase_ )
# Check attention is always last and order is fine
UpperCAmelCase_ : Tuple = True
UpperCAmelCase_ : List[Any] = True
UpperCAmelCase_ : Tuple = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : Union[str, Any] = model(**self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) )
if hasattr(self.model_tester ,"""num_hidden_states_types""" ):
UpperCAmelCase_ : List[Any] = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
UpperCAmelCase_ : List[str] = 2
self.assertEqual(out_len + added_hidden_states ,len(lowerCamelCase_ ) )
UpperCAmelCase_ : Any = outputs.attentions
self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,)
def A__ ( self: List[str] ,lowerCamelCase_: Dict ,lowerCamelCase_: Tuple ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Optional[int] ) -> List[Any]:
UpperCAmelCase_ : str = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : int = model(**self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) )
UpperCAmelCase_ : List[str] = outputs.hidden_states
UpperCAmelCase_ : Optional[Any] = getattr(
self.model_tester ,"""expected_num_hidden_layers""" ,len(self.model_tester.depths ) + 1 )
self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ )
# Swinv2 has a different seq_length
UpperCAmelCase_ : Optional[Any] = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCAmelCase_ : int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
UpperCAmelCase_ : Optional[int] = outputs.reshaped_hidden_states
self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = reshaped_hidden_states[0].shape
UpperCAmelCase_ : Optional[Any] = (
reshaped_hidden_states[0].view(lowerCamelCase_ ,lowerCamelCase_ ,height * width ).permute(0 ,2 ,1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
def A__ ( self: Any ) -> int:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Dict = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
UpperCAmelCase_ : Any = True
self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ : str = True
self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
def A__ ( self: List[str] ) -> Dict:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Union[str, Any] = 3
UpperCAmelCase_ : Optional[int] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
UpperCAmelCase_ : List[str] = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCAmelCase_ : List[Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
UpperCAmelCase_ : Optional[Any] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
UpperCAmelCase_ : Optional[Any] = True
self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,(padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ : List[str] = True
self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,(padded_height, padded_width) )
def A__ ( self: Optional[int] ) -> str:
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase_ )
def A__ ( self: Union[str, Any] ) -> Dict:
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
@slow
def A__ ( self: str ) -> Tuple:
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Dict = SwinvaModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def A__ ( self: Any ) -> int:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : List[str] = _config_zero_init(lowerCamelCase_ )
for model_class in self.all_model_classes:
UpperCAmelCase_ : int = model_class(config=lowerCamelCase_ )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' ,)
@require_vision
@require_torch
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def A__ ( self: Dict ) -> Optional[Any]:
return (
AutoImageProcessor.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" )
if is_vision_available()
else None
)
@slow
def A__ ( self: str ) -> List[Any]:
UpperCAmelCase_ : Tuple = SwinvaForImageClassification.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" ).to(
lowerCamelCase_ )
UpperCAmelCase_ : Any = self.default_image_processor
UpperCAmelCase_ : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
UpperCAmelCase_ : Optional[int] = image_processor(images=lowerCamelCase_ ,return_tensors="""pt""" ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : Optional[Any] = model(**lowerCamelCase_ )
# verify the logits
UpperCAmelCase_ : Dict = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,lowerCamelCase_ )
UpperCAmelCase_ : Any = torch.tensor([-0.3_9_4_7, -0.4_3_0_6, 0.0_0_2_6] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,lowerCamelCase_ ,atol=1e-4 ) )
| 345 | 1 |
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : List[Any] = (DPMSolverSDEScheduler,)
A__ : Tuple = 10
def A__ ( self: List[str] ,**lowerCamelCase_: Tuple ) -> str:
UpperCAmelCase_ : Optional[int] = {
"""num_train_timesteps""": 1100,
"""beta_start""": 0.0_0_0_1,
"""beta_end""": 0.0_2,
"""beta_schedule""": """linear""",
"""noise_sampler_seed""": 0,
}
config.update(**lowerCamelCase_ )
return config
def A__ ( self: str ) -> List[Any]:
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=lowerCamelCase_ )
def A__ ( self: Tuple ) -> int:
for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] ,[0.0_0_0_2, 0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=lowerCamelCase_ ,beta_end=lowerCamelCase_ )
def A__ ( self: List[Any] ) -> str:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=lowerCamelCase_ )
def A__ ( self: Union[str, Any] ) -> Dict:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase_ )
def A__ ( self: str ) -> Tuple:
UpperCAmelCase_ : int = self.scheduler_classes[0]
UpperCAmelCase_ : List[Any] = self.get_scheduler_config()
UpperCAmelCase_ : Tuple = scheduler_class(**lowerCamelCase_ )
scheduler.set_timesteps(self.num_inference_steps )
UpperCAmelCase_ : Optional[int] = self.dummy_model()
UpperCAmelCase_ : str = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCAmelCase_ : Any = sample.to(lowerCamelCase_ )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase_ : List[Any] = scheduler.scale_model_input(lowerCamelCase_ ,lowerCamelCase_ )
UpperCAmelCase_ : Dict = model(lowerCamelCase_ ,lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = scheduler.step(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = output.prev_sample
UpperCAmelCase_ : Tuple = torch.sum(torch.abs(lowerCamelCase_ ) )
UpperCAmelCase_ : Any = torch.mean(torch.abs(lowerCamelCase_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_6_7.4_7_8_2_1_0_4_4_9_2_1_8_7_5 ) < 1e-2
assert abs(result_mean.item() - 0.2_1_7_8_7_0_5_9_6_4_5_6_5_2_7_7 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_7_1.5_9_3_5_2_1_1_1_8_1_6_4_0_6 ) < 1e-2
assert abs(result_mean.item() - 0.2_2_3_4_2_9_0_6_8_9_2_2_9_9_6_5_2 ) < 1e-3
else:
assert abs(result_sum.item() - 1_6_2.5_2_3_8_3_4_2_2_8_5_1_5_6_2 ) < 1e-2
assert abs(result_mean.item() - 0.2_1_1_6_1_9_5_7_0_8_5_1_3_2_6 ) < 1e-3
def A__ ( self: str ) -> Tuple:
UpperCAmelCase_ : Tuple = self.scheduler_classes[0]
UpperCAmelCase_ : Optional[int] = self.get_scheduler_config(prediction_type="""v_prediction""" )
UpperCAmelCase_ : Union[str, Any] = scheduler_class(**lowerCamelCase_ )
scheduler.set_timesteps(self.num_inference_steps )
UpperCAmelCase_ : Any = self.dummy_model()
UpperCAmelCase_ : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCAmelCase_ : Union[str, Any] = sample.to(lowerCamelCase_ )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase_ : Union[str, Any] = scheduler.scale_model_input(lowerCamelCase_ ,lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = model(lowerCamelCase_ ,lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = scheduler.step(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
UpperCAmelCase_ : Tuple = output.prev_sample
UpperCAmelCase_ : Union[str, Any] = torch.sum(torch.abs(lowerCamelCase_ ) )
UpperCAmelCase_ : List[Any] = torch.mean(torch.abs(lowerCamelCase_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_2_4.7_7_1_4_9_2_0_0_4_3_9_4_5_3 ) < 1e-2
assert abs(result_mean.item() - 0.1_6_2_2_6_2_8_9_0_1_4_8_1_6_2_8_4 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_2_8.1_6_6_3_3_6_0_5_9_5_7_0_3 ) < 1e-2
assert abs(result_mean.item() - 0.1_6_6_8_8_3_2_6_0_0_1_1_6_7_2_9_7 ) < 1e-3
else:
assert abs(result_sum.item() - 1_1_9.8_4_8_7_5_4_8_8_2_8_1_2_5 ) < 1e-2
assert abs(result_mean.item() - 0.1_5_6_0_5_3_0_6_6_2_5_3_6_6_2_1 ) < 1e-3
def A__ ( self: Dict ) -> Dict:
UpperCAmelCase_ : List[str] = self.scheduler_classes[0]
UpperCAmelCase_ : Optional[Any] = self.get_scheduler_config()
UpperCAmelCase_ : str = scheduler_class(**lowerCamelCase_ )
scheduler.set_timesteps(self.num_inference_steps ,device=lowerCamelCase_ )
UpperCAmelCase_ : Tuple = self.dummy_model()
UpperCAmelCase_ : Optional[int] = self.dummy_sample_deter.to(lowerCamelCase_ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
UpperCAmelCase_ : Optional[int] = scheduler.scale_model_input(lowerCamelCase_ ,lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = model(lowerCamelCase_ ,lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = scheduler.step(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
UpperCAmelCase_ : int = output.prev_sample
UpperCAmelCase_ : Dict = torch.sum(torch.abs(lowerCamelCase_ ) )
UpperCAmelCase_ : Optional[int] = torch.mean(torch.abs(lowerCamelCase_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_6_7.4_6_9_5_7_3_9_7_4_6_0_9_3_8 ) < 1e-2
assert abs(result_mean.item() - 0.2_1_8_0_5_9_3_4_6_0_7_9_8_2_6_3_5 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_7_1.5_9_3_5_3_6_3_7_6_9_5_3_1_2 ) < 1e-2
assert abs(result_mean.item() - 0.2_2_3_4_2_9_0_8_3_8_2_4_1_5_7_7_1 ) < 1e-3
else:
assert abs(result_sum.item() - 1_6_2.5_2_3_8_3_4_2_2_8_5_1_5_6_2 ) < 1e-2
assert abs(result_mean.item() - 0.2_1_1_6_1_9_5_7_0_8_5_1_3_2_6 ) < 1e-3
def A__ ( self: List[str] ) -> List[Any]:
UpperCAmelCase_ : Optional[Any] = self.scheduler_classes[0]
UpperCAmelCase_ : Union[str, Any] = self.get_scheduler_config()
UpperCAmelCase_ : Optional[Any] = scheduler_class(**lowerCamelCase_ ,use_karras_sigmas=lowerCamelCase_ )
scheduler.set_timesteps(self.num_inference_steps ,device=lowerCamelCase_ )
UpperCAmelCase_ : Dict = self.dummy_model()
UpperCAmelCase_ : Any = self.dummy_sample_deter.to(lowerCamelCase_ ) * scheduler.init_noise_sigma
UpperCAmelCase_ : Optional[int] = sample.to(lowerCamelCase_ )
for t in scheduler.timesteps:
UpperCAmelCase_ : Optional[int] = scheduler.scale_model_input(lowerCamelCase_ ,lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = model(lowerCamelCase_ ,lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = scheduler.step(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
UpperCAmelCase_ : Dict = output.prev_sample
UpperCAmelCase_ : List[str] = torch.sum(torch.abs(lowerCamelCase_ ) )
UpperCAmelCase_ : Optional[int] = torch.mean(torch.abs(lowerCamelCase_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_7_6.6_6_9_7_4_1_3_5_7_4_2_1_8_8 ) < 1e-2
assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1e-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_7_7.6_3_6_5_3_5_6_4_4_5_3_1_2_5 ) < 1e-2
assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1e-2
else:
assert abs(result_sum.item() - 1_7_0.3_1_3_5_2_2_3_3_8_8_6_7_2 ) < 1e-2
assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1e-2
| 345 |
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
# See all LED models at https://huggingface.co/models?filter=LED
UpperCamelCase_ = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
UpperCamelCase_ = {
'''allenai/led-base-16384''': 16384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def lowerCamelCase_ ( ):
'''simple docstring'''
UpperCAmelCase_ : int = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
UpperCAmelCase_ : Dict = bs[:]
UpperCAmelCase_ : Any = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_a )
cs.append(2**8 + n )
n += 1
UpperCAmelCase_ : Any = [chr(_a ) for n in cs]
return dict(zip(_a , _a ) )
def lowerCamelCase_ ( _a : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = set()
UpperCAmelCase_ : List[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase_ : Optional[int] = char
return pairs
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : str = VOCAB_FILES_NAMES
A__ : List[str] = PRETRAINED_VOCAB_FILES_MAP
A__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Optional[int] = ["input_ids", "attention_mask"]
def __init__( self: Union[str, Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Any ,lowerCamelCase_: Union[str, Any]="replace" ,lowerCamelCase_: Optional[Any]="<s>" ,lowerCamelCase_: List[Any]="</s>" ,lowerCamelCase_: List[str]="</s>" ,lowerCamelCase_: int="<s>" ,lowerCamelCase_: int="<unk>" ,lowerCamelCase_: str="<pad>" ,lowerCamelCase_: Optional[Any]="<mask>" ,lowerCamelCase_: List[str]=False ,**lowerCamelCase_: Tuple ,) -> Any:
UpperCAmelCase_ : Union[str, Any] = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else bos_token
UpperCAmelCase_ : int = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else eos_token
UpperCAmelCase_ : List[str] = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else sep_token
UpperCAmelCase_ : List[str] = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else cls_token
UpperCAmelCase_ : Optional[Any] = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else unk_token
UpperCAmelCase_ : List[str] = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase_ : str = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else mask_token
super().__init__(
errors=lowerCamelCase_ ,bos_token=lowerCamelCase_ ,eos_token=lowerCamelCase_ ,unk_token=lowerCamelCase_ ,sep_token=lowerCamelCase_ ,cls_token=lowerCamelCase_ ,pad_token=lowerCamelCase_ ,mask_token=lowerCamelCase_ ,add_prefix_space=lowerCamelCase_ ,**lowerCamelCase_ ,)
with open(lowerCamelCase_ ,encoding="""utf-8""" ) as vocab_handle:
UpperCAmelCase_ : Union[str, Any] = json.load(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = {v: k for k, v in self.encoder.items()}
UpperCAmelCase_ : Any = errors # how to handle errors in decoding
UpperCAmelCase_ : int = bytes_to_unicode()
UpperCAmelCase_ : Dict = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCamelCase_ ,encoding="""utf-8""" ) as merges_handle:
UpperCAmelCase_ : Any = merges_handle.read().split("""\n""" )[1:-1]
UpperCAmelCase_ : int = [tuple(merge.split() ) for merge in bpe_merges]
UpperCAmelCase_ : Union[str, Any] = dict(zip(lowerCamelCase_ ,range(len(lowerCamelCase_ ) ) ) )
UpperCAmelCase_ : Tuple = {}
UpperCAmelCase_ : Optional[int] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCAmelCase_ : int = re.compile(R"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def A__ ( self: List[str] ) -> List[str]:
return len(self.encoder )
def A__ ( self: Any ) -> Union[str, Any]:
return dict(self.encoder ,**self.added_tokens_encoder )
def A__ ( self: Tuple ,lowerCamelCase_: Dict ) -> Optional[Any]:
if token in self.cache:
return self.cache[token]
UpperCAmelCase_ : Union[str, Any] = tuple(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = get_pairs(lowerCamelCase_ )
if not pairs:
return token
while True:
UpperCAmelCase_ : Union[str, Any] = min(lowerCamelCase_ ,key=lambda lowerCamelCase_ : self.bpe_ranks.get(lowerCamelCase_ ,float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase_ , UpperCAmelCase_ : Any = bigram
UpperCAmelCase_ : Optional[Any] = []
UpperCAmelCase_ : List[str] = 0
while i < len(lowerCamelCase_ ):
try:
UpperCAmelCase_ : str = word.index(lowerCamelCase_ ,lowerCamelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCAmelCase_ : Union[str, Any] = j
if word[i] == first and i < len(lowerCamelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase_ : List[str] = tuple(lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = new_word
if len(lowerCamelCase_ ) == 1:
break
else:
UpperCAmelCase_ : List[str] = get_pairs(lowerCamelCase_ )
UpperCAmelCase_ : int = """ """.join(lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = word
return word
def A__ ( self: Union[str, Any] ,lowerCamelCase_: Tuple ) -> List[str]:
UpperCAmelCase_ : str = []
for token in re.findall(self.pat ,lowerCamelCase_ ):
UpperCAmelCase_ : List[Any] = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCamelCase_ ).split(""" """ ) )
return bpe_tokens
def A__ ( self: List[Any] ,lowerCamelCase_: Optional[Any] ) -> Optional[int]:
return self.encoder.get(lowerCamelCase_ ,self.encoder.get(self.unk_token ) )
def A__ ( self: List[str] ,lowerCamelCase_: str ) -> Optional[Any]:
return self.decoder.get(lowerCamelCase_ )
def A__ ( self: List[str] ,lowerCamelCase_: List[str] ) -> List[Any]:
UpperCAmelCase_ : str = """""".join(lowerCamelCase_ )
UpperCAmelCase_ : int = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" ,errors=self.errors )
return text
def A__ ( self: Optional[Any] ,lowerCamelCase_: str ,lowerCamelCase_: Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(lowerCamelCase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase_ : List[Any] = os.path.join(
lowerCamelCase_ ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCAmelCase_ : List[str] = os.path.join(
lowerCamelCase_ ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=lowerCamelCase_ ,ensure_ascii=lowerCamelCase_ ) + """\n""" )
UpperCAmelCase_ : str = 0
with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda lowerCamelCase_ : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
""" Please check that the tokenizer is not corrupted!""" )
UpperCAmelCase_ : Tuple = token_index
writer.write(""" """.join(lowerCamelCase_ ) + """\n""" )
index += 1
return vocab_file, merge_file
def A__ ( self: str ,lowerCamelCase_: List[int] ,lowerCamelCase_: Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase_ : int = [self.cls_token_id]
UpperCAmelCase_ : Optional[int] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A__ ( self: Union[str, Any] ,lowerCamelCase_: List[int] ,lowerCamelCase_: Optional[List[int]] = None ,lowerCamelCase_: bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_ ,token_ids_a=lowerCamelCase_ ,already_has_special_tokens=lowerCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase_ )) + [1]
return [1] + ([0] * len(lowerCamelCase_ )) + [1, 1] + ([0] * len(lowerCamelCase_ )) + [1]
def A__ ( self: str ,lowerCamelCase_: List[int] ,lowerCamelCase_: Optional[List[int]] = None ) -> List[int]:
UpperCAmelCase_ : Optional[Any] = [self.sep_token_id]
UpperCAmelCase_ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A__ ( self: Optional[Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: str=False ,**lowerCamelCase_: List[str] ) -> Optional[int]:
UpperCAmelCase_ : Optional[int] = kwargs.pop("""add_prefix_space""" ,self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCamelCase_ ) > 0 and not text[0].isspace()):
UpperCAmelCase_ : Dict = """ """ + text
return (text, kwargs)
def A__ ( self: List[str] ,lowerCamelCase_: Union[Dict[str, EncodedInput], BatchEncoding] ,lowerCamelCase_: Optional[int] = None ,lowerCamelCase_: PaddingStrategy = PaddingStrategy.DO_NOT_PAD ,lowerCamelCase_: Optional[int] = None ,lowerCamelCase_: Optional[bool] = None ,) -> dict:
UpperCAmelCase_ : Optional[int] = super()._pad(
encoded_inputs=lowerCamelCase_ ,max_length=lowerCamelCase_ ,padding_strategy=lowerCamelCase_ ,pad_to_multiple_of=lowerCamelCase_ ,return_attention_mask=lowerCamelCase_ ,)
# Load from model defaults
if return_attention_mask is None:
UpperCAmelCase_ : str = """attention_mask""" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
UpperCAmelCase_ : str = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
UpperCAmelCase_ : List[Any] = len(encoded_inputs["""global_attention_mask"""] ) != len(lowerCamelCase_ )
if needs_to_be_padded:
UpperCAmelCase_ : Dict = len(lowerCamelCase_ ) - len(encoded_inputs["""global_attention_mask"""] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
UpperCAmelCase_ : str = (
encoded_inputs["""global_attention_mask"""] + [-1] * difference
)
elif self.padding_side == "left":
UpperCAmelCase_ : List[str] = [-1] * difference + encoded_inputs[
"""global_attention_mask"""
]
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return encoded_inputs
| 345 | 1 |
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class _snake_case ( __snake_case , unittest.TestCase ):
'''simple docstring'''
A__ : Optional[int] = BertJapaneseTokenizer
A__ : Any = False
A__ : Optional[Any] = True
def A__ ( self: List[Any] ) -> Optional[int]:
super().setUp()
UpperCAmelCase_ : Dict = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""こんにちは""",
"""こん""",
"""にちは""",
"""ばんは""",
"""##こん""",
"""##にちは""",
"""##ばんは""",
"""世界""",
"""##世界""",
"""、""",
"""##、""",
"""。""",
"""##。""",
]
UpperCAmelCase_ : List[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def A__ ( self: Optional[Any] ,lowerCamelCase_: Dict ) -> List[str]:
UpperCAmelCase_ : Union[str, Any] = """こんにちは、世界。 \nこんばんは、世界。"""
UpperCAmelCase_ : Optional[Any] = """こんにちは 、 世界 。 こんばんは 、 世界 。"""
return input_text, output_text
def A__ ( self: Any ,lowerCamelCase_: Tuple ) -> Dict:
UpperCAmelCase_ , UpperCAmelCase_ : str = self.get_input_output_texts(lowerCamelCase_ )
UpperCAmelCase_ : Tuple = tokenizer.encode(lowerCamelCase_ ,add_special_tokens=lowerCamelCase_ )
UpperCAmelCase_ : List[str] = tokenizer.decode(lowerCamelCase_ ,clean_up_tokenization_spaces=lowerCamelCase_ )
return text, ids
def A__ ( self: Tuple ) -> int:
pass # TODO add if relevant
def A__ ( self: int ) -> Any:
pass # TODO add if relevant
def A__ ( self: Union[str, Any] ) -> Tuple:
pass # TODO add if relevant
def A__ ( self: Any ) -> Optional[int]:
UpperCAmelCase_ : int = self.tokenizer_class(self.vocab_file )
UpperCAmelCase_ : Any = tokenizer.tokenize("""こんにちは、世界。\nこんばんは、世界。""" )
self.assertListEqual(lowerCamelCase_ ,["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) ,[3, 12, 10, 14, 4, 9, 12, 10, 14] )
def A__ ( self: Union[str, Any] ) -> int:
UpperCAmelCase_ : Optional[Any] = self.tokenizer_class(self.vocab_file ,word_tokenizer_type="""mecab""" )
self.assertIsNotNone(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = """こんにちは、世界。\nこんばんは、世界。"""
UpperCAmelCase_ : Tuple = tokenizer.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ ,["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) ,[3, 12, 10, 14, 4, 9, 12, 10, 14] )
UpperCAmelCase_ : str = os.path.join(self.tmpdirname ,"""tokenizer.bin""" )
with open(lowerCamelCase_ ,"""wb""" ) as handle:
pickle.dump(lowerCamelCase_ ,lowerCamelCase_ )
with open(lowerCamelCase_ ,"""rb""" ) as handle:
UpperCAmelCase_ : Optional[int] = pickle.load(lowerCamelCase_ )
UpperCAmelCase_ : Dict = tokenizer_new.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ ,lowerCamelCase_ )
def A__ ( self: List[str] ) -> str:
UpperCAmelCase_ : str = MecabTokenizer(mecab_dic="""ipadic""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) ,["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] ,)
def A__ ( self: Union[str, Any] ) -> Dict:
try:
UpperCAmelCase_ : List[str] = MecabTokenizer(mecab_dic="""unidic_lite""" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) ,["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] ,)
def A__ ( self: Dict ) -> Optional[Any]:
try:
UpperCAmelCase_ : Any = MecabTokenizer(mecab_dic="""unidic""" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) ,["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] ,)
def A__ ( self: Tuple ) -> Tuple:
UpperCAmelCase_ : Optional[Any] = MecabTokenizer(do_lower_case=lowerCamelCase_ ,mecab_dic="""ipadic""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) ,["""アップルストア""", """で""", """iphone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] ,)
def A__ ( self: Optional[int] ) -> List[Any]:
try:
UpperCAmelCase_ : List[str] = MecabTokenizer(
do_lower_case=lowerCamelCase_ ,normalize_text=lowerCamelCase_ ,mecab_option="""-d /usr/local/lib/mecab/dic/jumandic""" )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) ,["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れた""", """\u3000""", """。"""] ,)
def A__ ( self: Optional[Any] ) -> Tuple:
UpperCAmelCase_ : List[str] = MecabTokenizer(normalize_text=lowerCamelCase_ ,mecab_dic="""ipadic""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) ,["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """ """, """。"""] ,)
@require_sudachi
def A__ ( self: Union[str, Any] ) -> Tuple:
UpperCAmelCase_ : Optional[int] = self.tokenizer_class(self.vocab_file ,word_tokenizer_type="""sudachi""" )
self.assertIsNotNone(lowerCamelCase_ )
UpperCAmelCase_ : List[str] = """こんにちは、世界。\nこんばんは、世界。"""
UpperCAmelCase_ : Optional[Any] = tokenizer.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ ,["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) ,[3, 12, 10, 14, 4, 9, 12, 10, 14] )
UpperCAmelCase_ : str = os.path.join(self.tmpdirname ,"""tokenizer.bin""" )
with open(lowerCamelCase_ ,"""wb""" ) as handle:
pickle.dump(lowerCamelCase_ ,lowerCamelCase_ )
with open(lowerCamelCase_ ,"""rb""" ) as handle:
UpperCAmelCase_ : int = pickle.load(lowerCamelCase_ )
UpperCAmelCase_ : Tuple = tokenizer_new.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ ,lowerCamelCase_ )
@require_sudachi
def A__ ( self: List[Any] ) -> Tuple:
UpperCAmelCase_ : Any = SudachiTokenizer(sudachi_dict_type="""core""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) ,[""" """, """\t""", """アップル""", """ストア""", """で""", """iPhone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """ """, """。""", """ """, """ """] ,)
@require_sudachi
def A__ ( self: Optional[Any] ) -> int:
UpperCAmelCase_ : List[str] = SudachiTokenizer(sudachi_dict_type="""core""" ,sudachi_split_mode="""A""" )
self.assertListEqual(tokenizer.tokenize("""外国人参政権""" ) ,["""外国""", """人""", """参政""", """権"""] )
@require_sudachi
def A__ ( self: str ) -> List[Any]:
UpperCAmelCase_ : Optional[Any] = SudachiTokenizer(sudachi_dict_type="""core""" ,sudachi_split_mode="""B""" )
self.assertListEqual(tokenizer.tokenize("""外国人参政権""" ) ,["""外国人""", """参政権"""] )
@require_sudachi
def A__ ( self: int ) -> Optional[int]:
UpperCAmelCase_ : Optional[Any] = SudachiTokenizer(sudachi_dict_type="""core""" ,sudachi_split_mode="""C""" )
self.assertListEqual(tokenizer.tokenize("""外国人参政権""" ) ,["""外国人参政権"""] )
@require_sudachi
def A__ ( self: List[str] ) -> List[Any]:
UpperCAmelCase_ : str = SudachiTokenizer(do_lower_case=lowerCamelCase_ ,sudachi_dict_type="""core""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) ,[""" """, """\t""", """アップル""", """ストア""", """で""", """iphone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """ """, """。""", """ """, """ """] ,)
@require_sudachi
def A__ ( self: Tuple ) -> Tuple:
UpperCAmelCase_ : int = SudachiTokenizer(normalize_text=lowerCamelCase_ ,sudachi_dict_type="""core""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) ,[""" """, """\t""", """アップル""", """ストア""", """で""", """iPhone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """\u3000""", """。""", """ """, """ """] ,)
@require_sudachi
def A__ ( self: Union[str, Any] ) -> List[Any]:
UpperCAmelCase_ : List[Any] = SudachiTokenizer(trim_whitespace=lowerCamelCase_ ,sudachi_dict_type="""core""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) ,["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] ,)
@require_jumanpp
def A__ ( self: int ) -> List[Any]:
UpperCAmelCase_ : List[Any] = self.tokenizer_class(self.vocab_file ,word_tokenizer_type="""jumanpp""" )
self.assertIsNotNone(lowerCamelCase_ )
UpperCAmelCase_ : Any = """こんにちは、世界。\nこんばんは、世界。"""
UpperCAmelCase_ : List[str] = tokenizer.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ ,["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) ,[3, 12, 10, 14, 4, 9, 12, 10, 14] )
UpperCAmelCase_ : Optional[int] = os.path.join(self.tmpdirname ,"""tokenizer.bin""" )
with open(lowerCamelCase_ ,"""wb""" ) as handle:
pickle.dump(lowerCamelCase_ ,lowerCamelCase_ )
with open(lowerCamelCase_ ,"""rb""" ) as handle:
UpperCAmelCase_ : Optional[int] = pickle.load(lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = tokenizer_new.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ ,lowerCamelCase_ )
@require_jumanpp
def A__ ( self: Dict ) -> Union[str, Any]:
UpperCAmelCase_ : str = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) ,["""アップル""", """ストア""", """で""", """iPhone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] ,)
@require_jumanpp
def A__ ( self: Optional[Any] ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[Any] = JumanppTokenizer(do_lower_case=lowerCamelCase_ )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) ,["""アップル""", """ストア""", """で""", """iphone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] ,)
@require_jumanpp
def A__ ( self: List[Any] ) -> Tuple:
UpperCAmelCase_ : Optional[Any] = JumanppTokenizer(normalize_text=lowerCamelCase_ )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) ,["""ア""", """ッ""", """フ""", """゚""", """ル""", """ストア""", """で""", """iPhone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] ,)
@require_jumanpp
def A__ ( self: List[str] ) -> List[Any]:
UpperCAmelCase_ : Optional[int] = JumanppTokenizer(trim_whitespace=lowerCamelCase_ )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) ,["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れた""", """。"""] ,)
@require_jumanpp
def A__ ( self: List[Any] ) -> List[str]:
UpperCAmelCase_ : Optional[int] = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize("""ありがとうございますm(_ _)m見つけるのが大変です。""" ) ,["""ありがとう""", """ございます""", """m(_ _)m""", """見つける""", """の""", """が""", """大変です""", """。"""] ,)
def A__ ( self: Dict ) -> Dict:
UpperCAmelCase_ : int = ["""[UNK]""", """[CLS]""", """[SEP]""", """こんにちは""", """こん""", """にちは""", """ばんは""", """##こん""", """##にちは""", """##ばんは"""]
UpperCAmelCase_ : Optional[Any] = {}
for i, token in enumerate(lowerCamelCase_ ):
UpperCAmelCase_ : Optional[int] = i
UpperCAmelCase_ : int = WordpieceTokenizer(vocab=lowerCamelCase_ ,unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) ,[] )
self.assertListEqual(tokenizer.tokenize("""こんにちは""" ) ,["""こんにちは"""] )
self.assertListEqual(tokenizer.tokenize("""こんばんは""" ) ,["""こん""", """##ばんは"""] )
self.assertListEqual(tokenizer.tokenize("""こんばんは こんばんにちは こんにちは""" ) ,["""こん""", """##ばんは""", """[UNK]""", """こんにちは"""] )
def A__ ( self: Tuple ) -> int:
UpperCAmelCase_ : Optional[Any] = BertJapaneseTokenizer.from_pretrained("""nlp-waseda/roberta-base-japanese-with-auto-jumanpp""" )
UpperCAmelCase_ : Optional[Any] = tokenizer.subword_tokenizer
UpperCAmelCase_ : Optional[Any] = subword_tokenizer.tokenize("""国境 の 長い トンネル を 抜ける と 雪国 であった 。""" )
self.assertListEqual(lowerCamelCase_ ,["""▁国境""", """▁の""", """▁長い""", """▁トンネル""", """▁を""", """▁抜ける""", """▁と""", """▁雪""", """国""", """▁であった""", """▁。"""] )
UpperCAmelCase_ : str = subword_tokenizer.tokenize("""こんばんは こんばん にち は こんにちは""" )
self.assertListEqual(lowerCamelCase_ ,["""▁こん""", """ばん""", """は""", """▁こん""", """ばん""", """▁に""", """ち""", """▁は""", """▁こんにちは"""] )
def A__ ( self: int ) -> Optional[int]:
UpperCAmelCase_ : Union[str, Any] = self.tokenizer_class.from_pretrained("""cl-tohoku/bert-base-japanese""" )
UpperCAmelCase_ : Union[str, Any] = tokenizer.encode("""ありがとう。""" ,add_special_tokens=lowerCamelCase_ )
UpperCAmelCase_ : List[str] = tokenizer.encode("""どういたしまして。""" ,add_special_tokens=lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = tokenizer.build_inputs_with_special_tokens(lowerCamelCase_ )
UpperCAmelCase_ : Any = tokenizer.build_inputs_with_special_tokens(lowerCamelCase_ ,lowerCamelCase_ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class _snake_case ( __snake_case , unittest.TestCase ):
'''simple docstring'''
A__ : Union[str, Any] = BertJapaneseTokenizer
A__ : str = False
def A__ ( self: Optional[Any] ) -> Tuple:
super().setUp()
UpperCAmelCase_ : Any = ["""[UNK]""", """[CLS]""", """[SEP]""", """こ""", """ん""", """に""", """ち""", """は""", """ば""", """世""", """界""", """、""", """。"""]
UpperCAmelCase_ : Dict = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def A__ ( self: Optional[Any] ,**lowerCamelCase_: List[str] ) -> List[Any]:
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname ,subword_tokenizer_type="""character""" ,**lowerCamelCase_ )
def A__ ( self: Optional[Any] ,lowerCamelCase_: Optional[int] ) -> str:
UpperCAmelCase_ : Optional[Any] = """こんにちは、世界。 \nこんばんは、世界。"""
UpperCAmelCase_ : int = """こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。"""
return input_text, output_text
def A__ ( self: Dict ) -> Dict:
pass # TODO add if relevant
def A__ ( self: Any ) -> Optional[int]:
pass # TODO add if relevant
def A__ ( self: List[Any] ) -> Tuple:
pass # TODO add if relevant
def A__ ( self: Optional[int] ) -> Optional[int]:
UpperCAmelCase_ : Optional[int] = self.tokenizer_class(self.vocab_file ,subword_tokenizer_type="""character""" )
UpperCAmelCase_ : int = tokenizer.tokenize("""こんにちは、世界。 \nこんばんは、世界。""" )
self.assertListEqual(
lowerCamelCase_ ,["""こ""", """ん""", """に""", """ち""", """は""", """、""", """世""", """界""", """。""", """こ""", """ん""", """ば""", """ん""", """は""", """、""", """世""", """界""", """。"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) ,[3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] )
def A__ ( self: Union[str, Any] ) -> str:
UpperCAmelCase_ : List[str] = ["""[UNK]""", """[CLS]""", """[SEP]""", """こ""", """ん""", """に""", """ち""", """は""", """ば""", """世""", """界""", """、""", """。"""]
UpperCAmelCase_ : Optional[int] = {}
for i, token in enumerate(lowerCamelCase_ ):
UpperCAmelCase_ : Dict = i
UpperCAmelCase_ : Tuple = CharacterTokenizer(vocab=lowerCamelCase_ ,unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) ,[] )
self.assertListEqual(tokenizer.tokenize("""こんにちは""" ) ,["""こ""", """ん""", """に""", """ち""", """は"""] )
self.assertListEqual(tokenizer.tokenize("""こんにちほ""" ) ,["""こ""", """ん""", """に""", """ち""", """[UNK]"""] )
def A__ ( self: Optional[Any] ) -> str:
UpperCAmelCase_ : Tuple = self.tokenizer_class.from_pretrained("""cl-tohoku/bert-base-japanese-char""" )
UpperCAmelCase_ : Any = tokenizer.encode("""ありがとう。""" ,add_special_tokens=lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = tokenizer.encode("""どういたしまして。""" ,add_special_tokens=lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(lowerCamelCase_ )
UpperCAmelCase_ : Any = tokenizer.build_inputs_with_special_tokens(lowerCamelCase_ ,lowerCamelCase_ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: Tuple ) -> Dict:
UpperCAmelCase_ : Optional[int] = """cl-tohoku/bert-base-japanese"""
UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ ,lowerCamelCase_ )
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: Any ) -> List[Any]:
UpperCAmelCase_ : Optional[int] = """cl-tohoku/bert-base-japanese"""
with self.assertLogs("""transformers""" ,level="""WARNING""" ) as cm:
BertTokenizer.from_pretrained(lowerCamelCase_ )
self.assertTrue(
cm.records[0].message.startswith(
"""The tokenizer class you load from this checkpoint is not the same type as the class this function"""
""" is called from.""" ) )
UpperCAmelCase_ : Optional[Any] = """bert-base-cased"""
with self.assertLogs("""transformers""" ,level="""WARNING""" ) as cm:
BertJapaneseTokenizer.from_pretrained(lowerCamelCase_ )
self.assertTrue(
cm.records[0].message.startswith(
"""The tokenizer class you load from this checkpoint is not the same type as the class this function"""
""" is called from.""" ) )
| 345 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: Union[str, Any] ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def A__ ( self: List[str] ) -> Dict:
UpperCAmelCase_ : Union[str, Any] = 1
UpperCAmelCase_ : Tuple = 3
UpperCAmelCase_ : Optional[Any] = (32, 32)
UpperCAmelCase_ : Optional[int] = floats_tensor((batch_size, num_channels) + sizes ,rng=random.Random(0 ) ).to(lowerCamelCase_ )
return image
@property
def A__ ( self: List[Any] ) -> Optional[Any]:
torch.manual_seed(0 )
UpperCAmelCase_ : int = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") ,up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") ,cross_attention_dim=32 ,)
return model
@property
def A__ ( self: str ) -> List[str]:
torch.manual_seed(0 )
UpperCAmelCase_ : Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,)
return model
@property
def A__ ( self: Optional[int] ) -> int:
torch.manual_seed(0 )
UpperCAmelCase_ : Dict = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,)
return CLIPTextModel(lowerCamelCase_ )
@property
def A__ ( self: Tuple ) -> Tuple:
def extract(*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: str ):
class _snake_case :
'''simple docstring'''
def __init__( self: List[Any] ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = torch.ones([0] )
def A__ ( self: List[Any] ,lowerCamelCase_: str ) -> int:
self.pixel_values.to(lowerCamelCase_ )
return self
return Out()
return extract
def A__ ( self: Union[str, Any] ) -> Tuple:
UpperCAmelCase_ : int = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : int = self.dummy_cond_unet
UpperCAmelCase_ : Optional[Any] = DDIMScheduler(
beta_start=0.0_0_0_8_5 ,beta_end=0.0_1_2 ,beta_schedule="""scaled_linear""" ,clip_sample=lowerCamelCase_ ,set_alpha_to_one=lowerCamelCase_ ,)
UpperCAmelCase_ : str = self.dummy_vae
UpperCAmelCase_ : List[str] = self.dummy_text_encoder
UpperCAmelCase_ : int = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
UpperCAmelCase_ : str = StableDiffusionPipeline(
unet=lowerCamelCase_ ,scheduler=lowerCamelCase_ ,vae=lowerCamelCase_ ,text_encoder=lowerCamelCase_ ,tokenizer=lowerCamelCase_ ,safety_checker=lowerCamelCase_ ,feature_extractor=self.dummy_extractor ,)
UpperCAmelCase_ : List[str] = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : List[str] = """A painting of a squirrel eating a burger"""
UpperCAmelCase_ : str = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
UpperCAmelCase_ : int = sd_pipe([prompt] ,generator=lowerCamelCase_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" )
UpperCAmelCase_ : List[Any] = output.images
UpperCAmelCase_ : str = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
UpperCAmelCase_ : Dict = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" ,return_dict=lowerCamelCase_ ,)[0]
UpperCAmelCase_ : int = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ : Tuple = np.array([0.5_7_5_6, 0.6_1_1_8, 0.5_0_0_5, 0.5_0_4_1, 0.5_4_7_1, 0.4_7_2_6, 0.4_9_7_6, 0.4_8_6_5, 0.4_8_6_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def A__ ( self: Optional[Any] ) -> Any:
UpperCAmelCase_ : Tuple = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : Dict = self.dummy_cond_unet
UpperCAmelCase_ : List[Any] = PNDMScheduler(skip_prk_steps=lowerCamelCase_ )
UpperCAmelCase_ : str = self.dummy_vae
UpperCAmelCase_ : Union[str, Any] = self.dummy_text_encoder
UpperCAmelCase_ : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
UpperCAmelCase_ : Any = StableDiffusionPipeline(
unet=lowerCamelCase_ ,scheduler=lowerCamelCase_ ,vae=lowerCamelCase_ ,text_encoder=lowerCamelCase_ ,tokenizer=lowerCamelCase_ ,safety_checker=lowerCamelCase_ ,feature_extractor=self.dummy_extractor ,)
UpperCAmelCase_ : int = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = """A painting of a squirrel eating a burger"""
UpperCAmelCase_ : Optional[Any] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
UpperCAmelCase_ : Optional[Any] = sd_pipe([prompt] ,generator=lowerCamelCase_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" )
UpperCAmelCase_ : str = output.images
UpperCAmelCase_ : Union[str, Any] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
UpperCAmelCase_ : int = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" ,return_dict=lowerCamelCase_ ,)[0]
UpperCAmelCase_ : Dict = image[0, -3:, -3:, -1]
UpperCAmelCase_ : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ : Tuple = np.array([0.5_1_2_5, 0.5_7_1_6, 0.4_8_2_8, 0.5_0_6_0, 0.5_6_5_0, 0.4_7_6_8, 0.5_1_8_5, 0.4_8_9_5, 0.4_9_9_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def A__ ( self: str ) -> Dict:
UpperCAmelCase_ : Any = StableDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-lms-pipe""" ,safety_checker=lowerCamelCase_ )
assert isinstance(lowerCamelCase_ ,lowerCamelCase_ )
assert isinstance(pipe.scheduler ,lowerCamelCase_ )
assert pipe.safety_checker is None
UpperCAmelCase_ : List[Any] = pipe("""example prompt""" ,num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCamelCase_ )
UpperCAmelCase_ : Any = StableDiffusionPipeline.from_pretrained(lowerCamelCase_ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
UpperCAmelCase_ : Optional[int] = pipe("""example prompt""" ,num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != """cuda""" ,"""This test requires a GPU""" )
def A__ ( self: List[str] ) -> Any:
UpperCAmelCase_ : Tuple = self.dummy_cond_unet
UpperCAmelCase_ : Dict = PNDMScheduler(skip_prk_steps=lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = self.dummy_vae
UpperCAmelCase_ : List[str] = self.dummy_text_encoder
UpperCAmelCase_ : Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# put models in fp16
UpperCAmelCase_ : Optional[Any] = unet.half()
UpperCAmelCase_ : Optional[int] = vae.half()
UpperCAmelCase_ : int = bert.half()
# make sure here that pndm scheduler skips prk
UpperCAmelCase_ : Any = StableDiffusionPipeline(
unet=lowerCamelCase_ ,scheduler=lowerCamelCase_ ,vae=lowerCamelCase_ ,text_encoder=lowerCamelCase_ ,tokenizer=lowerCamelCase_ ,safety_checker=lowerCamelCase_ ,feature_extractor=self.dummy_extractor ,)
UpperCAmelCase_ : List[Any] = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : Tuple = """A painting of a squirrel eating a burger"""
UpperCAmelCase_ : Optional[int] = sd_pipe([prompt] ,num_inference_steps=2 ,output_type="""np""" ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: Optional[int] ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self: List[str] ) -> List[Any]:
UpperCAmelCase_ : Tuple = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" ,safety_checker=lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
UpperCAmelCase_ : str = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : str = (
"""portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"""
""" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"""
""" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"""
""" children from bahnhof zoo, detailed """
)
UpperCAmelCase_ : Optional[int] = 4003660346
UpperCAmelCase_ : int = 7
# without safety guidance (sld_guidance_scale = 0)
UpperCAmelCase_ : Dict = torch.manual_seed(lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=0 ,)
UpperCAmelCase_ : Optional[int] = output.images
UpperCAmelCase_ : Union[str, Any] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Dict = [0.2_2_7_8, 0.2_2_3_1, 0.2_2_4_9, 0.2_3_3_3, 0.2_3_0_3, 0.1_8_8_5, 0.2_2_7_3, 0.2_1_4_4, 0.2_1_7_6]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
# without safety guidance (strong configuration)
UpperCAmelCase_ : Union[str, Any] = torch.manual_seed(lowerCamelCase_ )
UpperCAmelCase_ : Any = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=2000 ,sld_warmup_steps=7 ,sld_threshold=0.0_2_5 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,)
UpperCAmelCase_ : Tuple = output.images
UpperCAmelCase_ : Union[str, Any] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : str = [0.2_3_8_3, 0.2_2_7_6, 0.2_3_6, 0.2_1_9_2, 0.2_1_8_6, 0.2_0_5_3, 0.1_9_7_1, 0.1_9_0_1, 0.1_7_1_9]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A__ ( self: Optional[int] ) -> Any:
UpperCAmelCase_ : Any = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" ,safety_checker=lowerCamelCase_ )
UpperCAmelCase_ : Any = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
UpperCAmelCase_ : str = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : Any = """padme amidala taking a bath artwork, safe for work, no nudity"""
UpperCAmelCase_ : List[Any] = 2734971755
UpperCAmelCase_ : Optional[Any] = 7
UpperCAmelCase_ : int = torch.manual_seed(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=0 ,)
UpperCAmelCase_ : Dict = output.images
UpperCAmelCase_ : Tuple = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Optional[Any] = [0.3_5_0_2, 0.3_6_2_2, 0.3_3_9_6, 0.3_6_4_2, 0.3_4_7_8, 0.3_3_1_8, 0.3_5, 0.3_3_4_8, 0.3_2_9_7]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
UpperCAmelCase_ : Any = torch.manual_seed(lowerCamelCase_ )
UpperCAmelCase_ : Tuple = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=2000 ,sld_warmup_steps=7 ,sld_threshold=0.0_2_5 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,)
UpperCAmelCase_ : Dict = output.images
UpperCAmelCase_ : List[Any] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Tuple = [0.5_5_3_1, 0.5_2_0_6, 0.4_8_9_5, 0.5_1_5_6, 0.5_1_8_2, 0.4_7_5_1, 0.4_8_0_2, 0.4_8_0_3, 0.4_4_4_3]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A__ ( self: Union[str, Any] ) -> int:
UpperCAmelCase_ : List[Any] = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" )
UpperCAmelCase_ : List[str] = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : Any = (
"""the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."""
""" leyendecker"""
)
UpperCAmelCase_ : Optional[Any] = 1044355234
UpperCAmelCase_ : List[str] = 12
UpperCAmelCase_ : List[Any] = torch.manual_seed(lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=0 ,)
UpperCAmelCase_ : Any = output.images
UpperCAmelCase_ : Dict = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Optional[Any] = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-7
UpperCAmelCase_ : Optional[int] = torch.manual_seed(lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=2000 ,sld_warmup_steps=7 ,sld_threshold=0.0_2_5 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,)
UpperCAmelCase_ : List[str] = output.images
UpperCAmelCase_ : Any = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Any = np.array([0.5_8_1_8, 0.6_2_8_5, 0.6_8_3_5, 0.6_0_1_9, 0.6_2_5, 0.6_7_5_4, 0.6_0_9_6, 0.6_3_3_4, 0.6_5_6_1] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 345 | 1 |
from collections import defaultdict
def lowerCamelCase_ ( _a : str , _a : str ):
'''simple docstring'''
UpperCAmelCase_ : str = first_str.lower().strip()
UpperCAmelCase_ : Optional[Any] = second_str.lower().strip()
# Remove whitespace
UpperCAmelCase_ : Optional[Any] = first_str.replace(""" """ , """""" )
UpperCAmelCase_ : int = second_str.replace(""" """ , """""" )
# Strings of different lengths are not anagrams
if len(_a ) != len(_a ):
return False
# Default values for count should be 0
UpperCAmelCase_ : defaultdict[str, int] = defaultdict(_a )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(_a ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCamelCase_ = input('''Enter the first string ''').strip()
UpperCamelCase_ = input('''Enter the second string ''').strip()
UpperCamelCase_ = check_anagrams(input_a, input_b)
print(F"{input_a} and {input_b} are {'' if status else 'not '}anagrams.")
| 345 |
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class _snake_case :
'''simple docstring'''
def __init__( self: Optional[int] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Tuple=13 ,lowerCamelCase_: int=7 ,lowerCamelCase_: Union[str, Any]=True ,lowerCamelCase_: Dict=True ,lowerCamelCase_: str=True ,lowerCamelCase_: Tuple=True ,lowerCamelCase_: int=99 ,lowerCamelCase_: List[str]=64 ,lowerCamelCase_: Tuple=32 ,lowerCamelCase_: List[str]=5 ,lowerCamelCase_: str=4 ,lowerCamelCase_: str=37 ,lowerCamelCase_: Union[str, Any]="gelu" ,lowerCamelCase_: Union[str, Any]=0.1 ,lowerCamelCase_: str=0.1 ,lowerCamelCase_: List[str]=512 ,lowerCamelCase_: Dict=16 ,lowerCamelCase_: List[str]=2 ,lowerCamelCase_: List[str]=0.0_2 ,lowerCamelCase_: Optional[Any]=3 ,lowerCamelCase_: Union[str, Any]=4 ,lowerCamelCase_: str=None ,) -> List[str]:
UpperCAmelCase_ : Any = parent
UpperCAmelCase_ : List[Any] = batch_size
UpperCAmelCase_ : Union[str, Any] = seq_length
UpperCAmelCase_ : Optional[int] = is_training
UpperCAmelCase_ : Dict = use_input_mask
UpperCAmelCase_ : Any = use_token_type_ids
UpperCAmelCase_ : Tuple = use_labels
UpperCAmelCase_ : List[Any] = vocab_size
UpperCAmelCase_ : str = hidden_size
UpperCAmelCase_ : List[str] = embedding_size
UpperCAmelCase_ : List[Any] = num_hidden_layers
UpperCAmelCase_ : List[Any] = num_attention_heads
UpperCAmelCase_ : List[Any] = intermediate_size
UpperCAmelCase_ : Tuple = hidden_act
UpperCAmelCase_ : str = hidden_dropout_prob
UpperCAmelCase_ : List[str] = attention_probs_dropout_prob
UpperCAmelCase_ : Any = max_position_embeddings
UpperCAmelCase_ : List[str] = type_vocab_size
UpperCAmelCase_ : Any = type_sequence_label_size
UpperCAmelCase_ : Optional[Any] = initializer_range
UpperCAmelCase_ : Optional[int] = num_labels
UpperCAmelCase_ : Optional[int] = num_choices
UpperCAmelCase_ : List[str] = scope
def A__ ( self: Any ) -> Optional[int]:
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase_ : List[str] = None
if self.use_input_mask:
UpperCAmelCase_ : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ : Dict = None
if self.use_token_type_ids:
UpperCAmelCase_ : str = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
UpperCAmelCase_ : int = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : Union[str, Any] = None
if self.use_labels:
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
UpperCAmelCase_ : int = ids_tensor([self.batch_size] ,self.num_choices )
UpperCAmelCase_ : Tuple = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self: Any ) -> Dict:
return MobileBertConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,embedding_size=self.embedding_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=lowerCamelCase_ ,initializer_range=self.initializer_range ,)
def A__ ( self: List[Any] ,lowerCamelCase_: str ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: Any ,lowerCamelCase_: List[Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: str ,lowerCamelCase_: str ) -> int:
UpperCAmelCase_ : Any = MobileBertModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : List[Any] = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ ,token_type_ids=lowerCamelCase_ )
UpperCAmelCase_ : Tuple = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def A__ ( self: Optional[Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Tuple ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Dict ) -> int:
UpperCAmelCase_ : Union[str, Any] = MobileBertForMaskedLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def A__ ( self: str ,lowerCamelCase_: Any ,lowerCamelCase_: Dict ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: str ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: int ) -> int:
UpperCAmelCase_ : List[Any] = MobileBertForNextSentencePrediction(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(
lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, 2) )
def A__ ( self: Tuple ,lowerCamelCase_: Tuple ,lowerCamelCase_: Dict ,lowerCamelCase_: List[str] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Tuple ,lowerCamelCase_: Dict ,lowerCamelCase_: Any ) -> Optional[Any]:
UpperCAmelCase_ : Tuple = MobileBertForPreTraining(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Optional[int] = model(
lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ ,next_sentence_label=lowerCamelCase_ ,)
self.parent.assertEqual(result.prediction_logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape ,(self.batch_size, 2) )
def A__ ( self: Any ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: Any ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Any ,lowerCamelCase_: int ,lowerCamelCase_: List[Any] ) -> List[str]:
UpperCAmelCase_ : Optional[Any] = MobileBertForQuestionAnswering(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : int = model(
lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,start_positions=lowerCamelCase_ ,end_positions=lowerCamelCase_ ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def A__ ( self: List[str] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Any ,lowerCamelCase_: Tuple ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Any ) -> str:
UpperCAmelCase_ : Optional[Any] = self.num_labels
UpperCAmelCase_ : Union[str, Any] = MobileBertForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Optional[int] = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def A__ ( self: Union[str, Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: str ,lowerCamelCase_: Dict ,lowerCamelCase_: Any ,lowerCamelCase_: List[str] ) -> Any:
UpperCAmelCase_ : str = self.num_labels
UpperCAmelCase_ : Optional[int] = MobileBertForTokenClassification(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : List[Any] = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def A__ ( self: Tuple ,lowerCamelCase_: str ,lowerCamelCase_: int ,lowerCamelCase_: Tuple ,lowerCamelCase_: List[Any] ,lowerCamelCase_: str ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: List[Any] ) -> Union[str, Any]:
UpperCAmelCase_ : Union[str, Any] = self.num_choices
UpperCAmelCase_ : Tuple = MobileBertForMultipleChoice(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Dict = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
UpperCAmelCase_ : Union[str, Any] = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
UpperCAmelCase_ : str = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
UpperCAmelCase_ : Optional[int] = model(
lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def A__ ( self: List[str] ) -> str:
UpperCAmelCase_ : str = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : Union[str, Any] = config_and_inputs
UpperCAmelCase_ : Dict = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _snake_case ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
A__ : Dict = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
A__ : List[str] = (
{
"feature-extraction": MobileBertModel,
"fill-mask": MobileBertForMaskedLM,
"question-answering": MobileBertForQuestionAnswering,
"text-classification": MobileBertForSequenceClassification,
"token-classification": MobileBertForTokenClassification,
"zero-shot": MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
A__ : List[str] = True
def A__ ( self: Dict ,lowerCamelCase_: Tuple ,lowerCamelCase_: Tuple ,lowerCamelCase_: int=False ) -> Union[str, Any]:
UpperCAmelCase_ : List[Any] = super()._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ,return_labels=lowerCamelCase_ )
if return_labels:
if model_class in get_values(lowerCamelCase_ ):
UpperCAmelCase_ : Any = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) ,dtype=torch.long ,device=lowerCamelCase_ )
UpperCAmelCase_ : List[str] = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=lowerCamelCase_ )
return inputs_dict
def A__ ( self: List[str] ) -> Any:
UpperCAmelCase_ : List[str] = MobileBertModelTester(self )
UpperCAmelCase_ : Union[str, Any] = ConfigTester(self ,config_class=lowerCamelCase_ ,hidden_size=37 )
def A__ ( self: Optional[Any] ) -> List[Any]:
self.config_tester.run_common_tests()
def A__ ( self: List[str] ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*lowerCamelCase_ )
def A__ ( self: Optional[int] ) -> Optional[int]:
UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*lowerCamelCase_ )
def A__ ( self: Optional[Any] ) -> Tuple:
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*lowerCamelCase_ )
def A__ ( self: List[Any] ) -> List[str]:
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*lowerCamelCase_ )
def A__ ( self: Optional[Any] ) -> Dict:
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*lowerCamelCase_ )
def A__ ( self: Optional[int] ) -> Optional[int]:
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*lowerCamelCase_ )
def A__ ( self: Union[str, Any] ) -> Optional[int]:
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*lowerCamelCase_ )
def A__ ( self: Any ) -> Optional[int]:
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*lowerCamelCase_ )
def lowerCamelCase_ ( _a : Union[str, Any] ):
'''simple docstring'''
return torch.tensor(
_a , dtype=torch.long , device=_a , )
UpperCamelCase_ = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
@slow
def A__ ( self: List[Any] ) -> str:
UpperCAmelCase_ : Any = MobileBertModel.from_pretrained("""google/mobilebert-uncased""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : str = _long_tensor([[101, 7110, 1005, 1056, 2023, 11333, 17413, 1029, 102]] )
with torch.no_grad():
UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ )[0]
UpperCAmelCase_ : Union[str, Any] = torch.Size((1, 9, 512) )
self.assertEqual(output.shape ,lowerCamelCase_ )
UpperCAmelCase_ : Tuple = torch.tensor(
[
[
[-2.473_6526e07, 8.269_1656e04, 1.652_1838e05],
[-5.754_1704e-01, 3.905_6022e00, 4.401_1507e00],
[2.604_7359e00, 1.567_7652e00, -1.732_4188e-01],
]
] ,device=lowerCamelCase_ ,)
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
UpperCAmelCase_ : Dict = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
UpperCAmelCase_ : Dict = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 345 | 1 |
def lowerCamelCase_ ( _a : int ):
'''simple docstring'''
if num <= 0:
raise ValueError("""Input must be a positive integer""" )
UpperCAmelCase_ : Union[str, Any] = [True] * (num + 1)
UpperCAmelCase_ : Optional[int] = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , _a ):
UpperCAmelCase_ : Dict = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase_ = int(input('''Enter a positive integer: ''').strip())
print(prime_sieve_eratosthenes(user_num))
| 345 |
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: str ) -> int:
UpperCAmelCase_ : List[Any] = """ylacombe/bark-small"""
UpperCAmelCase_ : Tuple = tempfile.mkdtemp()
UpperCAmelCase_ : Union[str, Any] = """en_speaker_1"""
UpperCAmelCase_ : Optional[Any] = """This is a test string"""
UpperCAmelCase_ : int = """speaker_embeddings_path.json"""
UpperCAmelCase_ : Any = """speaker_embeddings"""
def A__ ( self: Tuple ,**lowerCamelCase_: List[str] ) -> List[Any]:
return AutoTokenizer.from_pretrained(self.checkpoint ,**lowerCamelCase_ )
def A__ ( self: str ) -> Union[str, Any]:
shutil.rmtree(self.tmpdirname )
def A__ ( self: List[Any] ) -> int:
UpperCAmelCase_ : int = self.get_tokenizer()
UpperCAmelCase_ : Tuple = BarkProcessor(tokenizer=lowerCamelCase_ )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : Optional[int] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer.get_vocab() )
@slow
def A__ ( self: List[Any] ) -> Optional[int]:
UpperCAmelCase_ : List[Any] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint ,speaker_embeddings_dict_path=self.speaker_embeddings_dict_path ,)
processor.save_pretrained(
self.tmpdirname ,speaker_embeddings_dict_path=self.speaker_embeddings_dict_path ,speaker_embeddings_directory=self.speaker_embeddings_directory ,)
UpperCAmelCase_ : Optional[Any] = self.get_tokenizer(bos_token="""(BOS)""" ,eos_token="""(EOS)""" )
UpperCAmelCase_ : List[Any] = BarkProcessor.from_pretrained(
self.tmpdirname ,self.speaker_embeddings_dict_path ,bos_token="""(BOS)""" ,eos_token="""(EOS)""" ,)
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
def A__ ( self: List[str] ) -> Optional[Any]:
UpperCAmelCase_ : Any = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint ,speaker_embeddings_dict_path=self.speaker_embeddings_dict_path ,)
UpperCAmelCase_ : Optional[int] = 35
UpperCAmelCase_ : Optional[int] = 2
UpperCAmelCase_ : Dict = 8
UpperCAmelCase_ : Optional[int] = {
"""semantic_prompt""": np.ones(lowerCamelCase_ ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
UpperCAmelCase_ : str = processor(text=self.input_string ,voice_preset=lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() ,processed_voice_preset.get(lowerCamelCase_ ,np.array([] ) ).tolist() )
# test loading voice preset from npz file
UpperCAmelCase_ : List[Any] = os.path.join(self.tmpdirname ,"""file.npz""" )
np.savez(lowerCamelCase_ ,**lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = processor(text=self.input_string ,voice_preset=lowerCamelCase_ )
UpperCAmelCase_ : int = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() ,processed_voice_preset.get(lowerCamelCase_ ,np.array([] ) ).tolist() )
# test loading voice preset from the hub
UpperCAmelCase_ : Union[str, Any] = processor(text=self.input_string ,voice_preset=self.voice_preset )
def A__ ( self: Dict ) -> Tuple:
UpperCAmelCase_ : Any = self.get_tokenizer()
UpperCAmelCase_ : Dict = BarkProcessor(tokenizer=lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = processor(text=self.input_string )
UpperCAmelCase_ : str = tokenizer(
self.input_string ,padding="""max_length""" ,max_length=256 ,add_special_tokens=lowerCamelCase_ ,return_attention_mask=lowerCamelCase_ ,return_token_type_ids=lowerCamelCase_ ,)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key].squeeze().tolist() )
| 345 | 1 |
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _snake_case :
'''simple docstring'''
def __init__( self: Tuple ,lowerCamelCase_: List[str] ,lowerCamelCase_: int=13 ,lowerCamelCase_: int=32 ,lowerCamelCase_: Optional[int]=2 ,lowerCamelCase_: Any=3 ,lowerCamelCase_: str=16 ,lowerCamelCase_: Optional[Any]=[1, 2, 1] ,lowerCamelCase_: Tuple=[2, 2, 4] ,lowerCamelCase_: int=2 ,lowerCamelCase_: List[Any]=2.0 ,lowerCamelCase_: str=True ,lowerCamelCase_: Optional[int]=0.0 ,lowerCamelCase_: List[Any]=0.0 ,lowerCamelCase_: List[str]=0.1 ,lowerCamelCase_: Tuple="gelu" ,lowerCamelCase_: Union[str, Any]=False ,lowerCamelCase_: Union[str, Any]=True ,lowerCamelCase_: Optional[int]=0.0_2 ,lowerCamelCase_: int=1e-5 ,lowerCamelCase_: Optional[int]=True ,lowerCamelCase_: Union[str, Any]=None ,lowerCamelCase_: Union[str, Any]=True ,lowerCamelCase_: Optional[int]=10 ,lowerCamelCase_: Tuple=8 ,) -> List[Any]:
UpperCAmelCase_ : List[str] = parent
UpperCAmelCase_ : int = batch_size
UpperCAmelCase_ : int = image_size
UpperCAmelCase_ : Union[str, Any] = patch_size
UpperCAmelCase_ : Optional[Any] = num_channels
UpperCAmelCase_ : int = embed_dim
UpperCAmelCase_ : Union[str, Any] = depths
UpperCAmelCase_ : List[str] = num_heads
UpperCAmelCase_ : int = window_size
UpperCAmelCase_ : List[str] = mlp_ratio
UpperCAmelCase_ : Tuple = qkv_bias
UpperCAmelCase_ : Tuple = hidden_dropout_prob
UpperCAmelCase_ : str = attention_probs_dropout_prob
UpperCAmelCase_ : Tuple = drop_path_rate
UpperCAmelCase_ : List[str] = hidden_act
UpperCAmelCase_ : int = use_absolute_embeddings
UpperCAmelCase_ : Any = patch_norm
UpperCAmelCase_ : Optional[int] = layer_norm_eps
UpperCAmelCase_ : Tuple = initializer_range
UpperCAmelCase_ : Optional[Any] = is_training
UpperCAmelCase_ : Dict = scope
UpperCAmelCase_ : int = use_labels
UpperCAmelCase_ : Optional[Any] = type_sequence_label_size
UpperCAmelCase_ : List[str] = encoder_stride
def A__ ( self: Any ) -> int:
UpperCAmelCase_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ : List[Any] = None
if self.use_labels:
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase_ : str = self.get_config()
return config, pixel_values, labels
def A__ ( self: List[Any] ) -> Union[str, Any]:
return SwinvaConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,depths=self.depths ,num_heads=self.num_heads ,window_size=self.window_size ,mlp_ratio=self.mlp_ratio ,qkv_bias=self.qkv_bias ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,drop_path_rate=self.drop_path_rate ,hidden_act=self.hidden_act ,use_absolute_embeddings=self.use_absolute_embeddings ,path_norm=self.patch_norm ,layer_norm_eps=self.layer_norm_eps ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,)
def A__ ( self: Dict ,lowerCamelCase_: Tuple ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: List[str] ) -> str:
UpperCAmelCase_ : str = SwinvaModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
UpperCAmelCase_ : List[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, expected_seq_len, expected_dim) )
def A__ ( self: List[Any] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: int ,lowerCamelCase_: int ) -> int:
UpperCAmelCase_ : Any = SwinvaForMaskedImageModeling(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCAmelCase_ : str = 1
UpperCAmelCase_ : Optional[Any] = SwinvaForMaskedImageModeling(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ : int = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def A__ ( self: int ,lowerCamelCase_: int ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Optional[Any] ) -> int:
UpperCAmelCase_ : Union[str, Any] = self.type_sequence_label_size
UpperCAmelCase_ : int = SwinvaForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Optional[int] = model(lowerCamelCase_ ,labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def A__ ( self: str ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[Any] = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = config_and_inputs
UpperCAmelCase_ : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _snake_case ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
A__ : Tuple = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
A__ : Optional[Any] = (
{"feature-extraction": SwinvaModel, "image-classification": SwinvaForImageClassification}
if is_torch_available()
else {}
)
A__ : List[Any] = False
A__ : Tuple = False
A__ : int = False
A__ : Union[str, Any] = False
def A__ ( self: List[str] ) -> Optional[Any]:
UpperCAmelCase_ : Any = SwinvaModelTester(self )
UpperCAmelCase_ : str = ConfigTester(self ,config_class=lowerCamelCase_ ,embed_dim=37 )
def A__ ( self: Optional[int] ) -> List[Any]:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A__ ( self: Any ) -> Dict:
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
@unittest.skip(reason="""Got `CUDA error: misaligned address` with PyTorch 2.0.0.""" )
def A__ ( self: int ) -> Dict:
pass
@unittest.skip(reason="""Swinv2 does not use inputs_embeds""" )
def A__ ( self: Tuple ) -> List[str]:
pass
def A__ ( self: str ) -> List[Any]:
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : int = model_class(lowerCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
UpperCAmelCase_ : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase_ ,nn.Linear ) )
def A__ ( self: Optional[Any] ) -> Optional[int]:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Dict = model_class(lowerCamelCase_ )
UpperCAmelCase_ : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : int = [*signature.parameters.keys()]
UpperCAmelCase_ : Tuple = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,lowerCamelCase_ )
def A__ ( self: Union[str, Any] ) -> Optional[Any]:
UpperCAmelCase_ , UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Any = True
for model_class in self.all_model_classes:
UpperCAmelCase_ : Optional[Any] = True
UpperCAmelCase_ : Union[str, Any] = False
UpperCAmelCase_ : str = True
UpperCAmelCase_ : List[Any] = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : Optional[int] = model(**self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) )
UpperCAmelCase_ : Optional[Any] = outputs.attentions
UpperCAmelCase_ : List[str] = len(self.model_tester.depths )
self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCAmelCase_ : str = True
UpperCAmelCase_ : Optional[Any] = config.window_size**2
UpperCAmelCase_ : Optional[int] = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : Optional[Any] = model(**self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) )
UpperCAmelCase_ : List[Any] = outputs.attentions
self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ )
self.assertListEqual(
list(attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,)
UpperCAmelCase_ : Optional[Any] = len(lowerCamelCase_ )
# Check attention is always last and order is fine
UpperCAmelCase_ : Tuple = True
UpperCAmelCase_ : List[Any] = True
UpperCAmelCase_ : Tuple = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : Union[str, Any] = model(**self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) )
if hasattr(self.model_tester ,"""num_hidden_states_types""" ):
UpperCAmelCase_ : List[Any] = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
UpperCAmelCase_ : List[str] = 2
self.assertEqual(out_len + added_hidden_states ,len(lowerCamelCase_ ) )
UpperCAmelCase_ : Any = outputs.attentions
self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,)
def A__ ( self: List[str] ,lowerCamelCase_: Dict ,lowerCamelCase_: Tuple ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Optional[int] ) -> List[Any]:
UpperCAmelCase_ : str = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : int = model(**self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) )
UpperCAmelCase_ : List[str] = outputs.hidden_states
UpperCAmelCase_ : Optional[Any] = getattr(
self.model_tester ,"""expected_num_hidden_layers""" ,len(self.model_tester.depths ) + 1 )
self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ )
# Swinv2 has a different seq_length
UpperCAmelCase_ : Optional[Any] = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCAmelCase_ : int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
UpperCAmelCase_ : Optional[int] = outputs.reshaped_hidden_states
self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = reshaped_hidden_states[0].shape
UpperCAmelCase_ : Optional[Any] = (
reshaped_hidden_states[0].view(lowerCamelCase_ ,lowerCamelCase_ ,height * width ).permute(0 ,2 ,1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
def A__ ( self: Any ) -> int:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Dict = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
UpperCAmelCase_ : Any = True
self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ : str = True
self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
def A__ ( self: List[str] ) -> Dict:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Union[str, Any] = 3
UpperCAmelCase_ : Optional[int] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
UpperCAmelCase_ : List[str] = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCAmelCase_ : List[Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
UpperCAmelCase_ : Optional[Any] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
UpperCAmelCase_ : Optional[Any] = True
self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,(padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ : List[str] = True
self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,(padded_height, padded_width) )
def A__ ( self: Optional[int] ) -> str:
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase_ )
def A__ ( self: Union[str, Any] ) -> Dict:
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
@slow
def A__ ( self: str ) -> Tuple:
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Dict = SwinvaModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def A__ ( self: Any ) -> int:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : List[str] = _config_zero_init(lowerCamelCase_ )
for model_class in self.all_model_classes:
UpperCAmelCase_ : int = model_class(config=lowerCamelCase_ )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' ,)
@require_vision
@require_torch
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def A__ ( self: Dict ) -> Optional[Any]:
return (
AutoImageProcessor.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" )
if is_vision_available()
else None
)
@slow
def A__ ( self: str ) -> List[Any]:
UpperCAmelCase_ : Tuple = SwinvaForImageClassification.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" ).to(
lowerCamelCase_ )
UpperCAmelCase_ : Any = self.default_image_processor
UpperCAmelCase_ : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
UpperCAmelCase_ : Optional[int] = image_processor(images=lowerCamelCase_ ,return_tensors="""pt""" ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : Optional[Any] = model(**lowerCamelCase_ )
# verify the logits
UpperCAmelCase_ : Dict = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,lowerCamelCase_ )
UpperCAmelCase_ : Any = torch.tensor([-0.3_9_4_7, -0.4_3_0_6, 0.0_0_2_6] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,lowerCamelCase_ ,atol=1e-4 ) )
| 345 |
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: Optional[int] ) -> Any:
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
UpperCAmelCase_ : Union[str, Any] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : str = -1
UpperCAmelCase_ : Dict = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ )
UpperCAmelCase_ : Any = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
UpperCAmelCase_ : List[Any] = TextStreamer(lowerCamelCase_ )
model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ ,streamer=lowerCamelCase_ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
UpperCAmelCase_ : Optional[int] = cs.out[:-1]
self.assertEqual(lowerCamelCase_ ,lowerCamelCase_ )
def A__ ( self: Dict ) -> Optional[Any]:
UpperCAmelCase_ : str = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
UpperCAmelCase_ : Optional[Any] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = -1
UpperCAmelCase_ : List[Any] = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(lowerCamelCase_ )
UpperCAmelCase_ : List[str] = model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ )
UpperCAmelCase_ : Dict = tokenizer.decode(greedy_ids[0] )
UpperCAmelCase_ : str = TextIteratorStreamer(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer}
UpperCAmelCase_ : str = Thread(target=model.generate ,kwargs=lowerCamelCase_ )
thread.start()
UpperCAmelCase_ : int = """"""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(lowerCamelCase_ ,lowerCamelCase_ )
def A__ ( self: List[Any] ) -> Dict:
UpperCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
UpperCAmelCase_ : Optional[Any] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = -1
UpperCAmelCase_ : Tuple = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(lowerCamelCase_ )
UpperCAmelCase_ : Dict = model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ )
UpperCAmelCase_ : str = greedy_ids[:, input_ids.shape[1] :]
UpperCAmelCase_ : Dict = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
UpperCAmelCase_ : List[Any] = TextStreamer(lowerCamelCase_ ,skip_prompt=lowerCamelCase_ )
model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ ,streamer=lowerCamelCase_ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
UpperCAmelCase_ : List[str] = cs.out[:-1]
self.assertEqual(lowerCamelCase_ ,lowerCamelCase_ )
def A__ ( self: str ) -> str:
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
UpperCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained("""distilgpt2""" )
UpperCAmelCase_ : Optional[Any] = AutoModelForCausalLM.from_pretrained("""distilgpt2""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : Any = -1
UpperCAmelCase_ : Union[str, Any] = torch.ones((1, 5) ,device=lowerCamelCase_ ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
UpperCAmelCase_ : Union[str, Any] = TextStreamer(lowerCamelCase_ ,skip_special_tokens=lowerCamelCase_ )
model.generate(lowerCamelCase_ ,max_new_tokens=1 ,do_sample=lowerCamelCase_ ,streamer=lowerCamelCase_ )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
UpperCAmelCase_ : List[str] = cs.out[:-1] # Remove the final "\n"
UpperCAmelCase_ : Dict = tokenizer(lowerCamelCase_ ,return_tensors="""pt""" )
self.assertEqual(streamer_text_tokenized.input_ids.shape ,(1, 1) )
def A__ ( self: List[str] ) -> Any:
UpperCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
UpperCAmelCase_ : Any = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : List[str] = -1
UpperCAmelCase_ : Optional[Any] = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = TextIteratorStreamer(lowerCamelCase_ ,timeout=0.0_0_1 )
UpperCAmelCase_ : Any = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer}
UpperCAmelCase_ : Dict = Thread(target=model.generate ,kwargs=lowerCamelCase_ )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(lowerCamelCase_ ):
UpperCAmelCase_ : Union[str, Any] = """"""
for new_text in streamer:
streamer_text += new_text
| 345 | 1 |
import random
from typing import Any
def lowerCamelCase_ ( _a : list ):
'''simple docstring'''
for _ in range(len(_a ) ):
UpperCAmelCase_ : Tuple = random.randint(0 , len(_a ) - 1 )
UpperCAmelCase_ : List[Any] = random.randint(0 , len(_a ) - 1 )
UpperCAmelCase_ , UpperCAmelCase_ : int = data[b], data[a]
return data
if __name__ == "__main__":
UpperCamelCase_ = [0, 1, 2, 3, 4, 5, 6, 7]
UpperCamelCase_ = ['''python''', '''says''', '''hello''', '''!''']
print('''Fisher-Yates Shuffle:''')
print('''List''', integers, strings)
print('''FY Shuffle''', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 345 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
@property
def A__ ( self: Optional[int] ) -> int:
torch.manual_seed(0 )
UpperCAmelCase_ : Union[str, Any] = UNetaDModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") ,up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") ,)
return model
@property
def A__ ( self: Tuple ) -> Optional[Any]:
torch.manual_seed(0 )
UpperCAmelCase_ : List[str] = VQModel(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=3 ,)
return model
@property
def A__ ( self: Tuple ) -> Any:
torch.manual_seed(0 )
UpperCAmelCase_ : int = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,)
return CLIPTextModel(lowerCamelCase_ )
def A__ ( self: str ) -> Optional[Any]:
UpperCAmelCase_ : str = self.dummy_uncond_unet
UpperCAmelCase_ : List[Any] = DDIMScheduler()
UpperCAmelCase_ : List[Any] = self.dummy_vq_model
UpperCAmelCase_ : Optional[int] = LDMPipeline(unet=lowerCamelCase_ ,vqvae=lowerCamelCase_ ,scheduler=lowerCamelCase_ )
ldm.to(lowerCamelCase_ )
ldm.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : Any = torch.manual_seed(0 )
UpperCAmelCase_ : int = ldm(generator=lowerCamelCase_ ,num_inference_steps=2 ,output_type="""numpy""" ).images
UpperCAmelCase_ : List[str] = torch.manual_seed(0 )
UpperCAmelCase_ : Union[str, Any] = ldm(generator=lowerCamelCase_ ,num_inference_steps=2 ,output_type="""numpy""" ,return_dict=lowerCamelCase_ )[0]
UpperCAmelCase_ : Optional[Any] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ : str = np.array([0.8_5_1_2, 0.8_1_8, 0.6_4_1_1, 0.6_8_0_8, 0.4_4_6_5, 0.5_6_1_8, 0.4_6, 0.6_2_3_1, 0.5_1_7_2] )
UpperCAmelCase_ : Tuple = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: Optional[int] ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = LDMPipeline.from_pretrained("""CompVis/ldm-celebahq-256""" )
ldm.to(lowerCamelCase_ )
ldm.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = torch.manual_seed(0 )
UpperCAmelCase_ : Optional[int] = ldm(generator=lowerCamelCase_ ,num_inference_steps=5 ,output_type="""numpy""" ).images
UpperCAmelCase_ : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCAmelCase_ : int = np.array([0.4_3_9_9, 0.4_4_9_7_5, 0.4_6_8_2_5, 0.4_7_4, 0.4_3_5_9, 0.4_5_8_1, 0.4_5_0_9_5, 0.4_3_4_1, 0.4_4_4_7] )
UpperCAmelCase_ : Union[str, Any] = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 345 | 1 |
def lowerCamelCase_ ( _a : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = [0] * len(_a )
UpperCAmelCase_ : Dict = []
UpperCAmelCase_ : Optional[int] = []
UpperCAmelCase_ : Dict = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(_a ) ):
if indegree[i] == 0:
queue.append(_a )
while queue:
UpperCAmelCase_ : List[str] = queue.pop(0 )
cnt += 1
topo.append(_a )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(_a )
if cnt != len(_a ):
print("""Cycle exists""" )
else:
print(_a )
# Adjacency List of Graph
UpperCamelCase_ = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 345 |
def lowerCamelCase_ ( _a : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = [0] * len(_a )
UpperCAmelCase_ : Dict = []
UpperCAmelCase_ : Optional[int] = []
UpperCAmelCase_ : Dict = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(_a ) ):
if indegree[i] == 0:
queue.append(_a )
while queue:
UpperCAmelCase_ : List[str] = queue.pop(0 )
cnt += 1
topo.append(_a )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(_a )
if cnt != len(_a ):
print("""Cycle exists""" )
else:
print(_a )
# Adjacency List of Graph
UpperCamelCase_ = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 345 | 1 |
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
UpperCamelCase_ = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def lowerCamelCase_ ( _a : Optional[int] ):
'''simple docstring'''
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def lowerCamelCase_ ( _a : str , _a : Optional[Any] , _a : Optional[int] ):
'''simple docstring'''
return max(metric_fn(_a , _a ) for gt in ground_truths )
def lowerCamelCase_ ( _a : Dict , _a : List[str] , _a : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = [line.strip() for line in open(_a , """r""" ).readlines()]
UpperCAmelCase_ : List[str] = []
if args.gold_data_mode == "qa":
UpperCAmelCase_ : Any = pd.read_csv(_a , sep="""\t""" , header=_a )
for answer_list in data[1]:
UpperCAmelCase_ : List[Any] = ast.literal_eval(_a )
answers.append(_a )
else:
UpperCAmelCase_ : Union[str, Any] = [line.strip() for line in open(_a , """r""" ).readlines()]
UpperCAmelCase_ : Tuple = [[reference] for reference in references]
UpperCAmelCase_ : Any = 0
for prediction, ground_truths in zip(_a , _a ):
total += 1
em += metric_max_over_ground_truths(_a , _a , _a )
fa += metric_max_over_ground_truths(_a , _a , _a )
UpperCAmelCase_ : Dict = 1_0_0.0 * em / total
UpperCAmelCase_ : Optional[Any] = 1_0_0.0 * fa / total
logger.info(F'''F1: {fa:.2f}''' )
logger.info(F'''EM: {em:.2f}''' )
def lowerCamelCase_ ( _a : Any , _a : List[str] , _a : str ):
'''simple docstring'''
UpperCAmelCase_ : Any = args.k
UpperCAmelCase_ : Dict = [line.strip() for line in open(_a , """r""" ).readlines()]
UpperCAmelCase_ : Any = [line.strip() for line in open(_a , """r""" ).readlines()]
UpperCAmelCase_ : Dict = 0
for hypo, reference in zip(_a , _a ):
UpperCAmelCase_ : List[Any] = set(hypo.split("""\t""" )[:k] )
UpperCAmelCase_ : Any = set(reference.split("""\t""" ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
UpperCAmelCase_ : Union[str, Any] = 1_0_0.0 * em / total
logger.info(F'''Precision@{k}: {em: .2f}''' )
def lowerCamelCase_ ( _a : List[str] , _a : Any , _a : Optional[int] ):
'''simple docstring'''
def strip_title(_a : str ):
if title.startswith("""\"""" ):
UpperCAmelCase_ : int = title[1:]
if title.endswith("""\"""" ):
UpperCAmelCase_ : Union[str, Any] = title[:-1]
return title
UpperCAmelCase_ : Dict = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
_a , return_tensors="""pt""" , padding=_a , truncation=_a , )["""input_ids"""].to(args.device )
UpperCAmelCase_ : Optional[int] = rag_model.rag.question_encoder(_a )
UpperCAmelCase_ : Any = question_enc_outputs[0]
UpperCAmelCase_ : List[Any] = rag_model.retriever(
_a , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="""pt""" , )
UpperCAmelCase_ : str = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
UpperCAmelCase_ : Optional[Any] = []
for docs in all_docs:
UpperCAmelCase_ : Any = [strip_title(_a ) for title in docs["""title"""]]
provenance_strings.append("""\t""".join(_a ) )
return provenance_strings
def lowerCamelCase_ ( _a : Tuple , _a : List[str] , _a : str ):
'''simple docstring'''
with torch.no_grad():
UpperCAmelCase_ : Tuple = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
_a , return_tensors="""pt""" , padding=_a , truncation=_a )
UpperCAmelCase_ : int = inputs_dict.input_ids.to(args.device )
UpperCAmelCase_ : List[Any] = inputs_dict.attention_mask.to(args.device )
UpperCAmelCase_ : Dict = rag_model.generate( # rag_model overwrites generate
_a , attention_mask=_a , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=_a , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
UpperCAmelCase_ : List[Any] = rag_model.retriever.generator_tokenizer.batch_decode(_a , skip_special_tokens=_a )
if args.print_predictions:
for q, a in zip(_a , _a ):
logger.info("""Q: {} - A: {}""".format(_a , _a ) )
return answers
def lowerCamelCase_ ( ):
'''simple docstring'''
UpperCAmelCase_ : Any = argparse.ArgumentParser()
parser.add_argument(
"""--model_type""" , choices=["""rag_sequence""", """rag_token""", """bart"""] , type=_a , help=(
"""RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the"""
""" model_name_or_path"""
) , )
parser.add_argument(
"""--index_name""" , default=_a , choices=["""exact""", """compressed""", """legacy"""] , type=_a , help="""RAG model retriever type""" , )
parser.add_argument(
"""--index_path""" , default=_a , type=_a , help="""Path to the retrieval index""" , )
parser.add_argument("""--n_docs""" , default=5 , type=_a , help="""Number of retrieved docs""" )
parser.add_argument(
"""--model_name_or_path""" , default=_a , type=_a , required=_a , help="""Path to pretrained checkpoints or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--eval_mode""" , choices=["""e2e""", """retrieval"""] , default="""e2e""" , type=_a , help=(
"""Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates"""
""" precision@k."""
) , )
parser.add_argument("""--k""" , default=1 , type=_a , help="""k for the precision@k calculation""" )
parser.add_argument(
"""--evaluation_set""" , default=_a , type=_a , required=_a , help="""Path to a file containing evaluation samples""" , )
parser.add_argument(
"""--gold_data_path""" , default=_a , type=_a , required=_a , help="""Path to a tab-separated file with gold samples""" , )
parser.add_argument(
"""--gold_data_mode""" , default="""qa""" , type=_a , choices=["""qa""", """ans"""] , help=(
"""Format of the gold data file"""
"""qa - a single line in the following format: question [tab] answer_list"""
"""ans - a single line of the gold file contains the expected answer string"""
) , )
parser.add_argument(
"""--predictions_path""" , type=_a , default="""predictions.txt""" , help="""Name of the predictions file, to be stored in the checkpoints directory""" , )
parser.add_argument(
"""--eval_all_checkpoints""" , action="""store_true""" , help="""Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number""" , )
parser.add_argument(
"""--eval_batch_size""" , default=8 , type=_a , help="""Batch size per GPU/CPU for evaluation.""" , )
parser.add_argument(
"""--recalculate""" , help="""Recalculate predictions even if the prediction file exists""" , action="""store_true""" , )
parser.add_argument(
"""--num_beams""" , default=4 , type=_a , help="""Number of beams to be used when generating answers""" , )
parser.add_argument("""--min_length""" , default=1 , type=_a , help="""Min length of the generated answers""" )
parser.add_argument("""--max_length""" , default=50 , type=_a , help="""Max length of the generated answers""" )
parser.add_argument(
"""--print_predictions""" , action="""store_true""" , help="""If True, prints predictions while evaluating.""" , )
parser.add_argument(
"""--print_docs""" , action="""store_true""" , help="""If True, prints docs retried while generating.""" , )
UpperCAmelCase_ : Union[str, Any] = parser.parse_args()
UpperCAmelCase_ : Tuple = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
return args
def lowerCamelCase_ ( _a : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = {}
if args.model_type is None:
UpperCAmelCase_ : List[Any] = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith("""rag""" ):
UpperCAmelCase_ : Optional[int] = RagTokenForGeneration if args.model_type == """rag_token""" else RagSequenceForGeneration
UpperCAmelCase_ : int = args.n_docs
if args.index_name is not None:
UpperCAmelCase_ : Union[str, Any] = args.index_name
if args.index_path is not None:
UpperCAmelCase_ : List[str] = args.index_path
else:
UpperCAmelCase_ : Optional[int] = BartForConditionalGeneration
UpperCAmelCase_ : str = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info("""Evaluate the following checkpoints: %s""" , _a )
UpperCAmelCase_ : Optional[int] = get_scores if args.eval_mode == """e2e""" else get_precision_at_k
UpperCAmelCase_ : Union[str, Any] = evaluate_batch_eae if args.eval_mode == """e2e""" else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info("""Calculating metrics based on an existing predictions file: {}""".format(args.predictions_path ) )
score_fn(_a , args.predictions_path , args.gold_data_path )
continue
logger.info("""***** Running evaluation for {} *****""".format(_a ) )
logger.info(""" Batch size = %d""" , args.eval_batch_size )
logger.info(""" Predictions will be stored under {}""".format(args.predictions_path ) )
if args.model_type.startswith("""rag""" ):
UpperCAmelCase_ : Union[str, Any] = RagRetriever.from_pretrained(_a , **_a )
UpperCAmelCase_ : Tuple = model_class.from_pretrained(_a , retriever=_a , **_a )
model.retriever.init_retrieval()
else:
UpperCAmelCase_ : Optional[Any] = model_class.from_pretrained(_a , **_a )
model.to(args.device )
with open(args.evaluation_set , """r""" ) as eval_file, open(args.predictions_path , """w""" ) as preds_file:
UpperCAmelCase_ : Tuple = []
for line in tqdm(_a ):
questions.append(line.strip() )
if len(_a ) == args.eval_batch_size:
UpperCAmelCase_ : Tuple = evaluate_batch_fn(_a , _a , _a )
preds_file.write("""\n""".join(_a ) + """\n""" )
preds_file.flush()
UpperCAmelCase_ : List[Any] = []
if len(_a ) > 0:
UpperCAmelCase_ : int = evaluate_batch_fn(_a , _a , _a )
preds_file.write("""\n""".join(_a ) )
preds_file.flush()
score_fn(_a , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
UpperCamelCase_ = get_args()
main(args)
| 345 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'''microsoft/swinv2-tiny-patch4-window8-256''': (
'''https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json'''
),
}
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : Optional[Any] = "swinv2"
A__ : int = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self: List[str] ,lowerCamelCase_: List[str]=224 ,lowerCamelCase_: List[str]=4 ,lowerCamelCase_: List[Any]=3 ,lowerCamelCase_: Optional[Any]=96 ,lowerCamelCase_: Any=[2, 2, 6, 2] ,lowerCamelCase_: Dict=[3, 6, 12, 24] ,lowerCamelCase_: str=7 ,lowerCamelCase_: Optional[Any]=4.0 ,lowerCamelCase_: Tuple=True ,lowerCamelCase_: List[str]=0.0 ,lowerCamelCase_: Optional[int]=0.0 ,lowerCamelCase_: List[str]=0.1 ,lowerCamelCase_: str="gelu" ,lowerCamelCase_: str=False ,lowerCamelCase_: Dict=0.0_2 ,lowerCamelCase_: Union[str, Any]=1e-5 ,lowerCamelCase_: str=32 ,**lowerCamelCase_: List[str] ,) -> Tuple:
super().__init__(**lowerCamelCase_ )
UpperCAmelCase_ : Tuple = image_size
UpperCAmelCase_ : Tuple = patch_size
UpperCAmelCase_ : Dict = num_channels
UpperCAmelCase_ : List[Any] = embed_dim
UpperCAmelCase_ : Dict = depths
UpperCAmelCase_ : Dict = len(lowerCamelCase_ )
UpperCAmelCase_ : str = num_heads
UpperCAmelCase_ : Tuple = window_size
UpperCAmelCase_ : int = mlp_ratio
UpperCAmelCase_ : str = qkv_bias
UpperCAmelCase_ : Any = hidden_dropout_prob
UpperCAmelCase_ : Tuple = attention_probs_dropout_prob
UpperCAmelCase_ : int = drop_path_rate
UpperCAmelCase_ : Optional[Any] = hidden_act
UpperCAmelCase_ : List[str] = use_absolute_embeddings
UpperCAmelCase_ : Dict = layer_norm_eps
UpperCAmelCase_ : int = initializer_range
UpperCAmelCase_ : Union[str, Any] = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCAmelCase_ : List[str] = int(embed_dim * 2 ** (len(lowerCamelCase_ ) - 1) )
UpperCAmelCase_ : Any = (0, 0, 0, 0)
| 345 | 1 |
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: Union[str, Any] ) -> Union[str, Any]:
UpperCAmelCase_ : Union[str, Any] = [
"""safety_checker/pytorch_model.bin""",
"""safety_checker/model.safetensors""",
"""vae/diffusion_pytorch_model.bin""",
"""vae/diffusion_pytorch_model.safetensors""",
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
self.assertTrue(is_safetensors_compatible(lowerCamelCase_ ) )
def A__ ( self: Union[str, Any] ) -> Dict:
UpperCAmelCase_ : Optional[Any] = [
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
self.assertTrue(is_safetensors_compatible(lowerCamelCase_ ) )
def A__ ( self: str ) -> List[str]:
UpperCAmelCase_ : Tuple = [
"""safety_checker/pytorch_model.bin""",
"""safety_checker/model.safetensors""",
"""vae/diffusion_pytorch_model.bin""",
"""vae/diffusion_pytorch_model.safetensors""",
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
"""unet/diffusion_pytorch_model.bin""",
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(lowerCamelCase_ ) )
def A__ ( self: List[str] ) -> Any:
UpperCAmelCase_ : Tuple = [
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
]
self.assertTrue(is_safetensors_compatible(lowerCamelCase_ ) )
def A__ ( self: int ) -> Dict:
UpperCAmelCase_ : int = [
"""safety_checker/pytorch_model.bin""",
"""safety_checker/model.safetensors""",
"""vae/diffusion_pytorch_model.bin""",
"""vae/diffusion_pytorch_model.safetensors""",
"""text_encoder/pytorch_model.bin""",
# Removed: 'text_encoder/model.safetensors',
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
self.assertFalse(is_safetensors_compatible(lowerCamelCase_ ) )
def A__ ( self: Optional[int] ) -> int:
UpperCAmelCase_ : Dict = [
"""safety_checker/pytorch_model.fp16.bin""",
"""safety_checker/model.fp16.safetensors""",
"""vae/diffusion_pytorch_model.fp16.bin""",
"""vae/diffusion_pytorch_model.fp16.safetensors""",
"""text_encoder/pytorch_model.fp16.bin""",
"""text_encoder/model.fp16.safetensors""",
"""unet/diffusion_pytorch_model.fp16.bin""",
"""unet/diffusion_pytorch_model.fp16.safetensors""",
]
UpperCAmelCase_ : Dict = """fp16"""
self.assertTrue(is_safetensors_compatible(lowerCamelCase_ ,variant=lowerCamelCase_ ) )
def A__ ( self: Tuple ) -> str:
UpperCAmelCase_ : Dict = [
"""unet/diffusion_pytorch_model.fp16.bin""",
"""unet/diffusion_pytorch_model.fp16.safetensors""",
]
UpperCAmelCase_ : List[Any] = """fp16"""
self.assertTrue(is_safetensors_compatible(lowerCamelCase_ ,variant=lowerCamelCase_ ) )
def A__ ( self: Optional[Any] ) -> Dict:
# pass variant but use the non-variant filenames
UpperCAmelCase_ : Union[str, Any] = [
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
UpperCAmelCase_ : Union[str, Any] = """fp16"""
self.assertTrue(is_safetensors_compatible(lowerCamelCase_ ,variant=lowerCamelCase_ ) )
def A__ ( self: Dict ) -> Optional[Any]:
UpperCAmelCase_ : str = [
"""safety_checker/pytorch_model.fp16.bin""",
"""safety_checker/model.fp16.safetensors""",
"""vae/diffusion_pytorch_model.fp16.bin""",
"""vae/diffusion_pytorch_model.fp16.safetensors""",
"""text_encoder/pytorch_model.fp16.bin""",
"""text_encoder/model.fp16.safetensors""",
"""unet/diffusion_pytorch_model.fp16.bin""",
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
UpperCAmelCase_ : List[str] = """fp16"""
self.assertFalse(is_safetensors_compatible(lowerCamelCase_ ,variant=lowerCamelCase_ ) )
def A__ ( self: str ) -> Dict:
UpperCAmelCase_ : Optional[Any] = [
"""text_encoder/pytorch_model.fp16.bin""",
"""text_encoder/model.fp16.safetensors""",
]
UpperCAmelCase_ : Optional[int] = """fp16"""
self.assertTrue(is_safetensors_compatible(lowerCamelCase_ ,variant=lowerCamelCase_ ) )
def A__ ( self: List[Any] ) -> int:
# pass variant but use the non-variant filenames
UpperCAmelCase_ : Optional[int] = [
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
]
UpperCAmelCase_ : Any = """fp16"""
self.assertTrue(is_safetensors_compatible(lowerCamelCase_ ,variant=lowerCamelCase_ ) )
def A__ ( self: List[Any] ) -> List[Any]:
UpperCAmelCase_ : Dict = [
"""safety_checker/pytorch_model.fp16.bin""",
"""safety_checker/model.fp16.safetensors""",
"""vae/diffusion_pytorch_model.fp16.bin""",
"""vae/diffusion_pytorch_model.fp16.safetensors""",
"""text_encoder/pytorch_model.fp16.bin""",
# 'text_encoder/model.fp16.safetensors',
"""unet/diffusion_pytorch_model.fp16.bin""",
"""unet/diffusion_pytorch_model.fp16.safetensors""",
]
UpperCAmelCase_ : Any = """fp16"""
self.assertFalse(is_safetensors_compatible(lowerCamelCase_ ,variant=lowerCamelCase_ ) )
| 345 |
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: int ) -> str:
# A mock response for an HTTP head request to emulate server down
UpperCAmelCase_ : List[str] = mock.Mock()
UpperCAmelCase_ : List[Any] = 500
UpperCAmelCase_ : Union[str, Any] = {}
UpperCAmelCase_ : Union[str, Any] = HTTPError
UpperCAmelCase_ : Any = {}
# Download this model to make sure it's in the cache.
UpperCAmelCase_ : Union[str, Any] = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("""requests.Session.request""" ,return_value=lowerCamelCase_ ) as mock_head:
UpperCAmelCase_ : Any = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def A__ ( self: str ) -> int:
# A mock response for an HTTP head request to emulate server down
UpperCAmelCase_ : str = mock.Mock()
UpperCAmelCase_ : Optional[int] = 500
UpperCAmelCase_ : int = {}
UpperCAmelCase_ : Union[str, Any] = HTTPError
UpperCAmelCase_ : List[Any] = {}
# Download this model to make sure it's in the cache.
UpperCAmelCase_ : Optional[int] = GPTaTokenizerFast.from_pretrained("""gpt2""" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("""requests.Session.request""" ,return_value=lowerCamelCase_ ) as mock_head:
UpperCAmelCase_ : Any = GPTaTokenizerFast.from_pretrained("""gpt2""" )
# This check we did call the fake head request
mock_head.assert_called()
def A__ ( self: str ) -> Dict:
# This test is for deprecated behavior and can be removed in v5
try:
UpperCAmelCase_ : Any = tempfile.mktemp()
with open(lowerCamelCase_ ,"""wb""" ) as f:
http_get("""https://huggingface.co/albert-base-v1/resolve/main/spiece.model""" ,lowerCamelCase_ )
UpperCAmelCase_ : Tuple = AlbertTokenizer.from_pretrained(lowerCamelCase_ )
finally:
os.remove(lowerCamelCase_ )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile("""tokenizer.json""" ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open("""tokenizer.json""" ,"""wb""" ) as f:
http_get("""https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json""" ,lowerCamelCase_ )
UpperCAmelCase_ : str = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size ,1000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove("""tokenizer.json""" )
def A__ ( self: List[str] ) -> Tuple:
# This test is for deprecated behavior and can be removed in v5
UpperCAmelCase_ : str = AlbertTokenizer.from_pretrained("""https://huggingface.co/albert-base-v1/resolve/main/spiece.model""" )
@is_staging_test
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
A__ : str = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def A__ ( cls: Dict ) -> Optional[int]:
UpperCAmelCase_ : List[str] = TOKEN
HfFolder.save_token(lowerCamelCase_ )
@classmethod
def A__ ( cls: Optional[Any] ) -> List[str]:
try:
delete_repo(token=cls._token ,repo_id="""test-tokenizer""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id="""valid_org/test-tokenizer-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id="""test-dynamic-tokenizer""" )
except HTTPError:
pass
def A__ ( self: Any ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ : Tuple = os.path.join(lowerCamelCase_ ,"""vocab.txt""" )
with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
UpperCAmelCase_ : List[Any] = BertTokenizer(lowerCamelCase_ )
tokenizer.push_to_hub("""test-tokenizer""" ,use_auth_token=self._token )
UpperCAmelCase_ : List[Any] = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
# Reset repo
delete_repo(token=self._token ,repo_id="""test-tokenizer""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowerCamelCase_ ,repo_id="""test-tokenizer""" ,push_to_hub=lowerCamelCase_ ,use_auth_token=self._token )
UpperCAmelCase_ : List[Any] = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
def A__ ( self: Optional[int] ) -> Any:
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ : List[Any] = os.path.join(lowerCamelCase_ ,"""vocab.txt""" )
with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
UpperCAmelCase_ : Dict = BertTokenizer(lowerCamelCase_ )
tokenizer.push_to_hub("""valid_org/test-tokenizer-org""" ,use_auth_token=self._token )
UpperCAmelCase_ : Dict = BertTokenizer.from_pretrained("""valid_org/test-tokenizer-org""" )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
# Reset repo
delete_repo(token=self._token ,repo_id="""valid_org/test-tokenizer-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
lowerCamelCase_ ,repo_id="""valid_org/test-tokenizer-org""" ,push_to_hub=lowerCamelCase_ ,use_auth_token=self._token )
UpperCAmelCase_ : List[Any] = BertTokenizer.from_pretrained("""valid_org/test-tokenizer-org""" )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
@require_tokenizers
def A__ ( self: Optional[int] ) -> Optional[Any]:
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ : Any = os.path.join(lowerCamelCase_ ,"""vocab.txt""" )
with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
UpperCAmelCase_ : Optional[Any] = CustomTokenizer(lowerCamelCase_ )
# No fast custom tokenizer
tokenizer.push_to_hub("""test-dynamic-tokenizer""" ,use_auth_token=self._token )
UpperCAmelCase_ : Optional[Any] = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''' ,trust_remote_code=lowerCamelCase_ )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,"""CustomTokenizer""" )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ : List[str] = os.path.join(lowerCamelCase_ ,"""vocab.txt""" )
with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
UpperCAmelCase_ : str = BertTokenizerFast.from_pretrained(lowerCamelCase_ )
bert_tokenizer.save_pretrained(lowerCamelCase_ )
UpperCAmelCase_ : List[str] = CustomTokenizerFast.from_pretrained(lowerCamelCase_ )
tokenizer.push_to_hub("""test-dynamic-tokenizer""" ,use_auth_token=self._token )
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''' ,trust_remote_code=lowerCamelCase_ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,"""CustomTokenizerFast""" )
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(
F'''{USER}/test-dynamic-tokenizer''' ,use_fast=lowerCamelCase_ ,trust_remote_code=lowerCamelCase_ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,"""CustomTokenizer""" )
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: Optional[Any] ) -> Any:
UpperCAmelCase_ : Any = Trie()
trie.add("""Hello 友達""" )
self.assertEqual(trie.data ,{"""H""": {"""e""": {"""l""": {"""l""": {"""o""": {""" """: {"""友""": {"""達""": {"""""": 1}}}}}}}}} )
trie.add("""Hello""" )
trie.data
self.assertEqual(trie.data ,{"""H""": {"""e""": {"""l""": {"""l""": {"""o""": {"""""": 1, """ """: {"""友""": {"""達""": {"""""": 1}}}}}}}}} )
def A__ ( self: Tuple ) -> Optional[int]:
UpperCAmelCase_ : str = Trie()
self.assertEqual(trie.split("""[CLS] This is a extra_id_100""" ) ,["""[CLS] This is a extra_id_100"""] )
trie.add("""[CLS]""" )
trie.add("""extra_id_1""" )
trie.add("""extra_id_100""" )
self.assertEqual(trie.split("""[CLS] This is a extra_id_100""" ) ,["""[CLS]""", """ This is a """, """extra_id_100"""] )
def A__ ( self: Optional[Any] ) -> Optional[int]:
UpperCAmelCase_ : Dict = Trie()
trie.add("""A""" )
self.assertEqual(trie.split("""ABC""" ) ,["""A""", """BC"""] )
self.assertEqual(trie.split("""BCA""" ) ,["""BC""", """A"""] )
def A__ ( self: Union[str, Any] ) -> int:
UpperCAmelCase_ : List[str] = Trie()
trie.add("""TOKEN]""" )
trie.add("""[SPECIAL_TOKEN]""" )
self.assertEqual(trie.split("""This is something [SPECIAL_TOKEN]""" ) ,["""This is something """, """[SPECIAL_TOKEN]"""] )
def A__ ( self: int ) -> Union[str, Any]:
UpperCAmelCase_ : List[str] = Trie()
trie.add("""A""" )
trie.add("""P""" )
trie.add("""[SPECIAL_TOKEN]""" )
self.assertEqual(trie.split("""This is something [SPECIAL_TOKEN]""" ) ,["""This is something """, """[SPECIAL_TOKEN]"""] )
def A__ ( self: int ) -> List[str]:
UpperCAmelCase_ : int = Trie()
trie.add("""AB""" )
trie.add("""B""" )
trie.add("""C""" )
self.assertEqual(trie.split("""ABC""" ) ,["""AB""", """C"""] )
def A__ ( self: str ) -> Optional[int]:
UpperCAmelCase_ : Optional[Any] = Trie()
trie.add("""ABC""" )
trie.add("""B""" )
trie.add("""CD""" )
self.assertEqual(trie.split("""ABCD""" ) ,["""ABC""", """D"""] )
def A__ ( self: List[Any] ) -> Any:
# Even if the offsets are wrong, we necessarily output correct string
# parts.
UpperCAmelCase_ : Tuple = Trie()
UpperCAmelCase_ : Optional[Any] = trie.cut_text("""ABC""" ,[0, 0, 2, 1, 2, 3] )
self.assertEqual(lowerCamelCase_ ,["""AB""", """C"""] )
| 345 | 1 |
import os
def lowerCamelCase_ ( _a : str = "input.txt" ):
'''simple docstring'''
with open(os.path.join(os.path.dirname(_a ) , _a ) ) as input_file:
UpperCAmelCase_ : List[Any] = [
[int(_a ) for element in line.split(""",""" )]
for line in input_file.readlines()
]
UpperCAmelCase_ : int = len(_a )
UpperCAmelCase_ : Optional[int] = len(matrix[0] )
UpperCAmelCase_ : Any = [[-1 for _ in range(_a )] for _ in range(_a )]
for i in range(_a ):
UpperCAmelCase_ : Dict = matrix[i][0]
for j in range(1 , _a ):
for i in range(_a ):
UpperCAmelCase_ : Dict = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , _a ):
UpperCAmelCase_ : Any = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
UpperCAmelCase_ : Optional[int] = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(F"{solution() = }")
| 345 |
from ..utils import DummyObject, requires_backends
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Tuple = ["flax"]
def __init__( self: str ,*lowerCamelCase_: int ,**lowerCamelCase_: List[str] ) -> str:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Optional[Any] ,*lowerCamelCase_: Dict ,**lowerCamelCase_: List[str] ) -> Any:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: Optional[int] ,*lowerCamelCase_: Optional[int] ,**lowerCamelCase_: int ) -> Optional[int]:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Any = ["flax"]
def __init__( self: int ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: Tuple ) -> Union[str, Any]:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Optional[int] ,*lowerCamelCase_: Optional[int] ,**lowerCamelCase_: List[str] ) -> Union[str, Any]:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: Tuple ,*lowerCamelCase_: Tuple ,**lowerCamelCase_: Any ) -> int:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Dict = ["flax"]
def __init__( self: Dict ,*lowerCamelCase_: Optional[int] ,**lowerCamelCase_: List[Any] ) -> Any:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Tuple ,*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: List[Any] ) -> str:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: int ,*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: Optional[Any] ) -> int:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : List[str] = ["flax"]
def __init__( self: str ,*lowerCamelCase_: List[str] ,**lowerCamelCase_: Optional[int] ) -> Union[str, Any]:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Union[str, Any] ,*lowerCamelCase_: Any ,**lowerCamelCase_: Any ) -> Any:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: Dict ,*lowerCamelCase_: int ,**lowerCamelCase_: Optional[Any] ) -> int:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : int = ["flax"]
def __init__( self: Dict ,*lowerCamelCase_: Tuple ,**lowerCamelCase_: List[str] ) -> Optional[Any]:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Optional[Any] ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: str ) -> Any:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: Union[str, Any] ,*lowerCamelCase_: Dict ,**lowerCamelCase_: Optional[Any] ) -> str:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Optional[int] = ["flax"]
def __init__( self: str ,*lowerCamelCase_: Dict ,**lowerCamelCase_: Optional[int] ) -> Tuple:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: int ,*lowerCamelCase_: int ,**lowerCamelCase_: Tuple ) -> List[str]:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: str ,*lowerCamelCase_: Union[str, Any] ,**lowerCamelCase_: Optional[Any] ) -> Any:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : List[Any] = ["flax"]
def __init__( self: Union[str, Any] ,*lowerCamelCase_: Tuple ,**lowerCamelCase_: int ) -> List[Any]:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Tuple ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: Dict ) -> Dict:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: Dict ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: str ) -> Any:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Tuple = ["flax"]
def __init__( self: str ,*lowerCamelCase_: Any ,**lowerCamelCase_: int ) -> Tuple:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Dict ,*lowerCamelCase_: Optional[int] ,**lowerCamelCase_: Union[str, Any] ) -> List[str]:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: str ,*lowerCamelCase_: Union[str, Any] ,**lowerCamelCase_: Dict ) -> Optional[int]:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : str = ["flax"]
def __init__( self: Optional[Any] ,*lowerCamelCase_: str ,**lowerCamelCase_: List[str] ) -> Optional[Any]:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: List[str] ,*lowerCamelCase_: Dict ,**lowerCamelCase_: int ) -> List[str]:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: str ,*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: int ) -> Union[str, Any]:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Union[str, Any] = ["flax"]
def __init__( self: Any ,*lowerCamelCase_: Tuple ,**lowerCamelCase_: Optional[int] ) -> List[str]:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Optional[int] ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: str ) -> Union[str, Any]:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: List[Any] ,*lowerCamelCase_: Any ,**lowerCamelCase_: Any ) -> int:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Tuple = ["flax"]
def __init__( self: Any ,*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: Dict ) -> str:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Tuple ,*lowerCamelCase_: Union[str, Any] ,**lowerCamelCase_: List[str] ) -> int:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: List[Any] ,*lowerCamelCase_: str ,**lowerCamelCase_: str ) -> Any:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Optional[Any] = ["flax"]
def __init__( self: Dict ,*lowerCamelCase_: int ,**lowerCamelCase_: Optional[Any] ) -> Union[str, Any]:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: int ,*lowerCamelCase_: int ,**lowerCamelCase_: Tuple ) -> Union[str, Any]:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: Optional[Any] ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: Optional[int] ) -> int:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Optional[int] = ["flax"]
def __init__( self: List[str] ,*lowerCamelCase_: Dict ,**lowerCamelCase_: Dict ) -> int:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Dict ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: Dict ) -> Union[str, Any]:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: int ,*lowerCamelCase_: Any ,**lowerCamelCase_: Any ) -> Optional[Any]:
requires_backends(cls ,["""flax"""] )
| 345 | 1 |
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
UpperCamelCase_ = logging.get_logger(__name__)
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : Dict = ["input_features", "attention_mask"]
def __init__( self: Any ,lowerCamelCase_: Union[str, Any]=80 ,lowerCamelCase_: List[Any]=16000 ,lowerCamelCase_: Optional[Any]=80 ,lowerCamelCase_: Dict=0.0 ,lowerCamelCase_: str=True ,lowerCamelCase_: Optional[Any]=True ,lowerCamelCase_: Any=True ,**lowerCamelCase_: List[str] ,) -> List[str]:
super().__init__(feature_size=lowerCamelCase_ ,sampling_rate=lowerCamelCase_ ,padding_value=lowerCamelCase_ ,**lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = num_mel_bins
UpperCAmelCase_ : str = do_ceptral_normalize
UpperCAmelCase_ : Optional[Any] = normalize_means
UpperCAmelCase_ : List[Any] = normalize_vars
UpperCAmelCase_ : Dict = True
def A__ ( self: str ,lowerCamelCase_: np.ndarray ,) -> np.ndarray:
UpperCAmelCase_ : Tuple = waveform * (2**15) # Kaldi compliance: 16-bit signed integers
UpperCAmelCase_ : List[Any] = torch.from_numpy(lowerCamelCase_ ).unsqueeze(0 )
UpperCAmelCase_ : str = ta_kaldi.fbank(lowerCamelCase_ ,num_mel_bins=self.num_mel_bins ,sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def A__ ( lowerCamelCase_: np.ndarray ,lowerCamelCase_: int ,lowerCamelCase_: Optional[bool] = True ,lowerCamelCase_: Optional[bool] = True ,lowerCamelCase_: float = 0.0 ,) -> np.ndarray:
# make sure we normalize float32 arrays
if normalize_means:
UpperCAmelCase_ : Optional[Any] = x[:input_length].mean(axis=0 )
UpperCAmelCase_ : Any = np.subtract(lowerCamelCase_ ,lowerCamelCase_ )
if normalize_vars:
UpperCAmelCase_ : List[Any] = x[:input_length].std(axis=0 )
UpperCAmelCase_ : List[str] = np.divide(lowerCamelCase_ ,lowerCamelCase_ )
if input_length < x.shape[0]:
UpperCAmelCase_ : str = padding_value
# make sure array is in float32
UpperCAmelCase_ : List[str] = x.astype(np.floataa )
return x
def A__ ( self: List[Any] ,lowerCamelCase_: List[np.ndarray] ,lowerCamelCase_: Optional[np.ndarray] = None ) -> List[np.ndarray]:
UpperCAmelCase_ : Optional[int] = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(lowerCamelCase_ ,lowerCamelCase_ ,self.normalize_means ,self.normalize_vars ,self.padding_value )
for x, n in zip(lowerCamelCase_ ,lowerCamelCase_ )
]
def __call__( self: str ,lowerCamelCase_: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,lowerCamelCase_: Union[bool, str, PaddingStrategy] = False ,lowerCamelCase_: Optional[int] = None ,lowerCamelCase_: bool = False ,lowerCamelCase_: Optional[int] = None ,lowerCamelCase_: Optional[Union[str, TensorType]] = None ,lowerCamelCase_: Optional[int] = None ,lowerCamelCase_: Optional[bool] = None ,**lowerCamelCase_: Optional[Any] ,) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
F''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'''
F''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
UpperCAmelCase_ : int = isinstance(lowerCamelCase_ ,np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
UpperCAmelCase_ : Union[str, Any] = is_batched_numpy or (
isinstance(lowerCamelCase_ ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) ))
)
if is_batched:
UpperCAmelCase_ : int = [np.asarray(lowerCamelCase_ ,dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(lowerCamelCase_ ,np.ndarray ):
UpperCAmelCase_ : Tuple = np.asarray(lowerCamelCase_ ,dtype=np.floataa )
elif isinstance(lowerCamelCase_ ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
UpperCAmelCase_ : List[Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
UpperCAmelCase_ : List[str] = [raw_speech]
# extract fbank features
UpperCAmelCase_ : int = [self._extract_fbank_features(lowerCamelCase_ ) for waveform in raw_speech]
# convert into correct format for padding
UpperCAmelCase_ : Optional[Any] = BatchFeature({"""input_features""": features} )
UpperCAmelCase_ : Optional[int] = self.pad(
lowerCamelCase_ ,padding=lowerCamelCase_ ,max_length=lowerCamelCase_ ,truncation=lowerCamelCase_ ,pad_to_multiple_of=lowerCamelCase_ ,return_attention_mask=lowerCamelCase_ ,**lowerCamelCase_ ,)
# make sure list is in array format
UpperCAmelCase_ : Optional[int] = padded_inputs.get("""input_features""" )
if isinstance(input_features[0] ,lowerCamelCase_ ):
UpperCAmelCase_ : Dict = [np.asarray(lowerCamelCase_ ,dtype=np.floataa ) for feature in input_features]
UpperCAmelCase_ : Tuple = padded_inputs.get("""attention_mask""" )
if attention_mask is not None:
UpperCAmelCase_ : Tuple = [np.asarray(lowerCamelCase_ ,dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
UpperCAmelCase_ : List[Any] = (
np.array(lowerCamelCase_ ,dtype=np.intaa )
if self._get_padding_strategies(lowerCamelCase_ ,max_length=lowerCamelCase_ ) is not PaddingStrategy.DO_NOT_PAD
else None
)
UpperCAmelCase_ : Union[str, Any] = self.normalize(
padded_inputs["""input_features"""] ,attention_mask=lowerCamelCase_ )
if return_tensors is not None:
UpperCAmelCase_ : Tuple = padded_inputs.convert_to_tensors(lowerCamelCase_ )
return padded_inputs
| 345 |
import random
from typing import Any
def lowerCamelCase_ ( _a : list ):
'''simple docstring'''
for _ in range(len(_a ) ):
UpperCAmelCase_ : Tuple = random.randint(0 , len(_a ) - 1 )
UpperCAmelCase_ : List[Any] = random.randint(0 , len(_a ) - 1 )
UpperCAmelCase_ , UpperCAmelCase_ : int = data[b], data[a]
return data
if __name__ == "__main__":
UpperCamelCase_ = [0, 1, 2, 3, 4, 5, 6, 7]
UpperCamelCase_ = ['''python''', '''says''', '''hello''', '''!''']
print('''Fisher-Yates Shuffle:''')
print('''List''', integers, strings)
print('''FY Shuffle''', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 345 | 1 |
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
class _snake_case ( __snake_case ):
'''simple docstring'''
def __init__( self: Tuple ,lowerCamelCase_: Union[List[ControlNetModel], Tuple[ControlNetModel]] ) -> Tuple:
super().__init__()
UpperCAmelCase_ : Optional[Any] = nn.ModuleList(lowerCamelCase_ )
def A__ ( self: str ,lowerCamelCase_: torch.FloatTensor ,lowerCamelCase_: Union[torch.Tensor, float, int] ,lowerCamelCase_: torch.Tensor ,lowerCamelCase_: List[torch.tensor] ,lowerCamelCase_: List[float] ,lowerCamelCase_: Optional[torch.Tensor] = None ,lowerCamelCase_: Optional[torch.Tensor] = None ,lowerCamelCase_: Optional[torch.Tensor] = None ,lowerCamelCase_: Optional[Dict[str, Any]] = None ,lowerCamelCase_: bool = False ,lowerCamelCase_: bool = True ,) -> Union[ControlNetOutput, Tuple]:
for i, (image, scale, controlnet) in enumerate(zip(lowerCamelCase_ ,lowerCamelCase_ ,self.nets ) ):
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = controlnet(
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,)
# merge samples
if i == 0:
UpperCAmelCase_ , UpperCAmelCase_ : str = down_samples, mid_sample
else:
UpperCAmelCase_ : Tuple = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(lowerCamelCase_ ,lowerCamelCase_ )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def A__ ( self: List[str] ,lowerCamelCase_: Union[str, os.PathLike] ,lowerCamelCase_: bool = True ,lowerCamelCase_: Callable = None ,lowerCamelCase_: bool = False ,lowerCamelCase_: Optional[str] = None ,) -> Dict:
UpperCAmelCase_ : Optional[Any] = 0
UpperCAmelCase_ : str = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
lowerCamelCase_ ,is_main_process=lowerCamelCase_ ,save_function=lowerCamelCase_ ,safe_serialization=lowerCamelCase_ ,variant=lowerCamelCase_ ,)
idx += 1
UpperCAmelCase_ : List[Any] = model_path_to_save + F'''_{idx}'''
@classmethod
def A__ ( cls: Tuple ,lowerCamelCase_: Optional[Union[str, os.PathLike]] ,**lowerCamelCase_: List[str] ) -> Optional[int]:
UpperCAmelCase_ : List[str] = 0
UpperCAmelCase_ : List[Any] = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
UpperCAmelCase_ : Any = pretrained_model_path
while os.path.isdir(lowerCamelCase_ ):
UpperCAmelCase_ : Tuple = ControlNetModel.from_pretrained(lowerCamelCase_ ,**lowerCamelCase_ )
controlnets.append(lowerCamelCase_ )
idx += 1
UpperCAmelCase_ : List[str] = pretrained_model_path + F'''_{idx}'''
logger.info(F'''{len(lowerCamelCase_ )} controlnets loaded from {pretrained_model_path}.''' )
if len(lowerCamelCase_ ) == 0:
raise ValueError(
F'''No ControlNets found under {os.path.dirname(lowerCamelCase_ )}. Expected at least {pretrained_model_path + '_0'}.''' )
return cls(lowerCamelCase_ )
| 345 |
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class _snake_case ( nn.Module ):
'''simple docstring'''
A__ : int
A__ : int
A__ : float = 0.0
A__ : int = 1
A__ : int = 1
A__ : bool = True
A__ : bool = False
A__ : bool = False
A__ : bool = False
A__ : jnp.dtype = jnp.floataa
def A__ ( self: Dict ) -> List[str]:
UpperCAmelCase_ : Optional[int] = []
UpperCAmelCase_ : Optional[int] = []
for i in range(self.num_layers ):
UpperCAmelCase_ : List[Any] = self.in_channels if i == 0 else self.out_channels
UpperCAmelCase_ : List[Any] = FlaxResnetBlockaD(
in_channels=lowerCamelCase_ ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = FlaxTransformeraDModel(
in_channels=self.out_channels ,n_heads=self.num_attention_heads ,d_head=self.out_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,only_cross_attention=self.only_cross_attention ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
attentions.append(lowerCamelCase_ )
UpperCAmelCase_ : int = resnets
UpperCAmelCase_ : Tuple = attentions
if self.add_downsample:
UpperCAmelCase_ : List[Any] = FlaxDownsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self: Optional[Any] ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: str ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: int=True ) -> int:
UpperCAmelCase_ : List[Any] = ()
for resnet, attn in zip(self.resnets ,self.attentions ):
UpperCAmelCase_ : str = resnet(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = attn(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
output_states += (hidden_states,)
if self.add_downsample:
UpperCAmelCase_ : List[Any] = self.downsamplers_a(lowerCamelCase_ )
output_states += (hidden_states,)
return hidden_states, output_states
class _snake_case ( nn.Module ):
'''simple docstring'''
A__ : int
A__ : int
A__ : float = 0.0
A__ : int = 1
A__ : bool = True
A__ : jnp.dtype = jnp.floataa
def A__ ( self: Dict ) -> int:
UpperCAmelCase_ : List[str] = []
for i in range(self.num_layers ):
UpperCAmelCase_ : int = self.in_channels if i == 0 else self.out_channels
UpperCAmelCase_ : Dict = FlaxResnetBlockaD(
in_channels=lowerCamelCase_ ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = resnets
if self.add_downsample:
UpperCAmelCase_ : List[str] = FlaxDownsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self: Any ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Any ,lowerCamelCase_: List[Any]=True ) -> Any:
UpperCAmelCase_ : Union[str, Any] = ()
for resnet in self.resnets:
UpperCAmelCase_ : Tuple = resnet(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
output_states += (hidden_states,)
if self.add_downsample:
UpperCAmelCase_ : List[str] = self.downsamplers_a(lowerCamelCase_ )
output_states += (hidden_states,)
return hidden_states, output_states
class _snake_case ( nn.Module ):
'''simple docstring'''
A__ : int
A__ : int
A__ : int
A__ : float = 0.0
A__ : int = 1
A__ : int = 1
A__ : bool = True
A__ : bool = False
A__ : bool = False
A__ : bool = False
A__ : jnp.dtype = jnp.floataa
def A__ ( self: str ) -> Any:
UpperCAmelCase_ : Dict = []
UpperCAmelCase_ : List[str] = []
for i in range(self.num_layers ):
UpperCAmelCase_ : int = self.in_channels if (i == self.num_layers - 1) else self.out_channels
UpperCAmelCase_ : int = self.prev_output_channel if i == 0 else self.out_channels
UpperCAmelCase_ : Optional[Any] = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(lowerCamelCase_ )
UpperCAmelCase_ : int = FlaxTransformeraDModel(
in_channels=self.out_channels ,n_heads=self.num_attention_heads ,d_head=self.out_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,only_cross_attention=self.only_cross_attention ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
attentions.append(lowerCamelCase_ )
UpperCAmelCase_ : List[str] = resnets
UpperCAmelCase_ : Dict = attentions
if self.add_upsample:
UpperCAmelCase_ : Optional[Any] = FlaxUpsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self: Optional[int] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: int ,lowerCamelCase_: Any ,lowerCamelCase_: str ,lowerCamelCase_: List[str]=True ) -> List[str]:
for resnet, attn in zip(self.resnets ,self.attentions ):
# pop res hidden states
UpperCAmelCase_ : List[str] = res_hidden_states_tuple[-1]
UpperCAmelCase_ : Union[str, Any] = res_hidden_states_tuple[:-1]
UpperCAmelCase_ : Optional[Any] = jnp.concatenate((hidden_states, res_hidden_states) ,axis=-1 )
UpperCAmelCase_ : Tuple = resnet(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = attn(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
if self.add_upsample:
UpperCAmelCase_ : Dict = self.upsamplers_a(lowerCamelCase_ )
return hidden_states
class _snake_case ( nn.Module ):
'''simple docstring'''
A__ : int
A__ : int
A__ : int
A__ : float = 0.0
A__ : int = 1
A__ : bool = True
A__ : jnp.dtype = jnp.floataa
def A__ ( self: Dict ) -> Dict:
UpperCAmelCase_ : Any = []
for i in range(self.num_layers ):
UpperCAmelCase_ : str = self.in_channels if (i == self.num_layers - 1) else self.out_channels
UpperCAmelCase_ : Optional[int] = self.prev_output_channel if i == 0 else self.out_channels
UpperCAmelCase_ : Any = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(lowerCamelCase_ )
UpperCAmelCase_ : str = resnets
if self.add_upsample:
UpperCAmelCase_ : Union[str, Any] = FlaxUpsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self: Dict ,lowerCamelCase_: Dict ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Any=True ) -> List[str]:
for resnet in self.resnets:
# pop res hidden states
UpperCAmelCase_ : Dict = res_hidden_states_tuple[-1]
UpperCAmelCase_ : str = res_hidden_states_tuple[:-1]
UpperCAmelCase_ : List[Any] = jnp.concatenate((hidden_states, res_hidden_states) ,axis=-1 )
UpperCAmelCase_ : List[str] = resnet(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
if self.add_upsample:
UpperCAmelCase_ : Optional[Any] = self.upsamplers_a(lowerCamelCase_ )
return hidden_states
class _snake_case ( nn.Module ):
'''simple docstring'''
A__ : int
A__ : float = 0.0
A__ : int = 1
A__ : int = 1
A__ : bool = False
A__ : bool = False
A__ : jnp.dtype = jnp.floataa
def A__ ( self: Dict ) -> List[str]:
# there is always at least one resnet
UpperCAmelCase_ : List[Any] = [
FlaxResnetBlockaD(
in_channels=self.in_channels ,out_channels=self.in_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
]
UpperCAmelCase_ : Any = []
for _ in range(self.num_layers ):
UpperCAmelCase_ : Optional[Any] = FlaxTransformeraDModel(
in_channels=self.in_channels ,n_heads=self.num_attention_heads ,d_head=self.in_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
attentions.append(lowerCamelCase_ )
UpperCAmelCase_ : Any = FlaxResnetBlockaD(
in_channels=self.in_channels ,out_channels=self.in_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(lowerCamelCase_ )
UpperCAmelCase_ : Dict = resnets
UpperCAmelCase_ : Any = attentions
def __call__( self: str ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: str ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Union[str, Any]=True ) -> List[Any]:
UpperCAmelCase_ : List[Any] = self.resnets[0](lowerCamelCase_ ,lowerCamelCase_ )
for attn, resnet in zip(self.attentions ,self.resnets[1:] ):
UpperCAmelCase_ : Optional[Any] = attn(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = resnet(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
return hidden_states
| 345 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'''tiiuae/falcon-40b''': '''https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json''',
'''tiiuae/falcon-7b''': '''https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json''',
}
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : int = "falcon"
A__ : int = ["past_key_values"]
def __init__( self: Optional[Any] ,lowerCamelCase_: List[Any]=65024 ,lowerCamelCase_: Optional[Any]=4544 ,lowerCamelCase_: Tuple=32 ,lowerCamelCase_: Optional[Any]=71 ,lowerCamelCase_: str=1e-5 ,lowerCamelCase_: str=0.0_2 ,lowerCamelCase_: Tuple=True ,lowerCamelCase_: Optional[int]=0.0 ,lowerCamelCase_: str=0.0 ,lowerCamelCase_: Any=None ,lowerCamelCase_: Optional[Any]=False ,lowerCamelCase_: str=False ,lowerCamelCase_: Optional[Any]=True ,lowerCamelCase_: Tuple=True ,lowerCamelCase_: Tuple=False ,lowerCamelCase_: Dict=11 ,lowerCamelCase_: List[str]=11 ,**lowerCamelCase_: Optional[int] ,) -> str:
UpperCAmelCase_ : Dict = vocab_size
# Backward compatibility with n_embed kwarg
UpperCAmelCase_ : Union[str, Any] = kwargs.pop("""n_embed""" ,lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = hidden_size if n_embed is None else n_embed
UpperCAmelCase_ : Dict = num_hidden_layers
UpperCAmelCase_ : Dict = num_attention_heads
UpperCAmelCase_ : Dict = layer_norm_epsilon
UpperCAmelCase_ : Tuple = initializer_range
UpperCAmelCase_ : int = use_cache
UpperCAmelCase_ : int = hidden_dropout
UpperCAmelCase_ : str = attention_dropout
UpperCAmelCase_ : int = bos_token_id
UpperCAmelCase_ : Tuple = eos_token_id
UpperCAmelCase_ : Tuple = num_attention_heads if num_kv_heads is None else num_kv_heads
UpperCAmelCase_ : List[str] = alibi
UpperCAmelCase_ : Optional[int] = new_decoder_architecture
UpperCAmelCase_ : Optional[Any] = multi_query # Ignored when new_decoder_architecture is True
UpperCAmelCase_ : List[Any] = parallel_attn
UpperCAmelCase_ : Optional[int] = bias
super().__init__(bos_token_id=lowerCamelCase_ ,eos_token_id=lowerCamelCase_ ,**lowerCamelCase_ )
@property
def A__ ( self: List[Any] ) -> Tuple:
return self.hidden_size // self.num_attention_heads
@property
def A__ ( self: Union[str, Any] ) -> Union[str, Any]:
return not self.alibi
| 345 |
import pickle
import numpy as np
from matplotlib import pyplot as plt
class _snake_case :
'''simple docstring'''
def __init__( self: Any ,lowerCamelCase_: Dict ,lowerCamelCase_: Tuple ,lowerCamelCase_: Dict ,lowerCamelCase_: Tuple ,lowerCamelCase_: Any ,lowerCamelCase_: Tuple=0.2 ,lowerCamelCase_: Union[str, Any]=0.2 ) -> List[str]:
UpperCAmelCase_ : List[Any] = bp_numa
UpperCAmelCase_ : str = bp_numa
UpperCAmelCase_ : List[Any] = bp_numa
UpperCAmelCase_ : Optional[int] = conva_get[:2]
UpperCAmelCase_ : List[Any] = conva_get[2]
UpperCAmelCase_ : str = size_pa
UpperCAmelCase_ : Optional[int] = rate_w
UpperCAmelCase_ : Dict = rate_t
UpperCAmelCase_ : List[Any] = [
np.mat(-1 * np.random.rand(self.conva[0] ,self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
UpperCAmelCase_ : int = np.mat(-1 * np.random.rand(self.num_bpa ,self.num_bpa ) + 0.5 )
UpperCAmelCase_ : int = np.mat(-1 * np.random.rand(self.num_bpa ,self.num_bpa ) + 0.5 )
UpperCAmelCase_ : Dict = -2 * np.random.rand(self.conva[1] ) + 1
UpperCAmelCase_ : str = -2 * np.random.rand(self.num_bpa ) + 1
UpperCAmelCase_ : Union[str, Any] = -2 * np.random.rand(self.num_bpa ) + 1
def A__ ( self: str ,lowerCamelCase_: Optional[Any] ) -> Tuple:
# save model dict with pickle
UpperCAmelCase_ : Dict = {
"""num_bp1""": self.num_bpa,
"""num_bp2""": self.num_bpa,
"""num_bp3""": self.num_bpa,
"""conv1""": self.conva,
"""step_conv1""": self.step_conva,
"""size_pooling1""": self.size_poolinga,
"""rate_weight""": self.rate_weight,
"""rate_thre""": self.rate_thre,
"""w_conv1""": self.w_conva,
"""wkj""": self.wkj,
"""vji""": self.vji,
"""thre_conv1""": self.thre_conva,
"""thre_bp2""": self.thre_bpa,
"""thre_bp3""": self.thre_bpa,
}
with open(lowerCamelCase_ ,"""wb""" ) as f:
pickle.dump(lowerCamelCase_ ,lowerCamelCase_ )
print(F'''Model saved: {save_path}''' )
@classmethod
def A__ ( cls: List[str] ,lowerCamelCase_: str ) -> List[str]:
# read saved model
with open(lowerCamelCase_ ,"""rb""" ) as f:
UpperCAmelCase_ : Any = pickle.load(lowerCamelCase_ ) # noqa: S301
UpperCAmelCase_ : Union[str, Any] = model_dic.get("""conv1""" )
conv_get.append(model_dic.get("""step_conv1""" ) )
UpperCAmelCase_ : List[str] = model_dic.get("""size_pooling1""" )
UpperCAmelCase_ : Tuple = model_dic.get("""num_bp1""" )
UpperCAmelCase_ : Optional[Any] = model_dic.get("""num_bp2""" )
UpperCAmelCase_ : List[str] = model_dic.get("""num_bp3""" )
UpperCAmelCase_ : List[Any] = model_dic.get("""rate_weight""" )
UpperCAmelCase_ : Dict = model_dic.get("""rate_thre""" )
# create model instance
UpperCAmelCase_ : List[Any] = CNN(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
# modify model parameter
UpperCAmelCase_ : Any = model_dic.get("""w_conv1""" )
UpperCAmelCase_ : int = model_dic.get("""wkj""" )
UpperCAmelCase_ : int = model_dic.get("""vji""" )
UpperCAmelCase_ : Optional[int] = model_dic.get("""thre_conv1""" )
UpperCAmelCase_ : List[str] = model_dic.get("""thre_bp2""" )
UpperCAmelCase_ : Dict = model_dic.get("""thre_bp3""" )
return conv_ins
def A__ ( self: List[Any] ,lowerCamelCase_: Union[str, Any] ) -> Tuple:
return 1 / (1 + np.exp(-1 * x ))
def A__ ( self: Union[str, Any] ,lowerCamelCase_: Union[str, Any] ) -> Optional[Any]:
return round(lowerCamelCase_ ,3 )
def A__ ( self: Tuple ,lowerCamelCase_: Any ,lowerCamelCase_: List[str] ,lowerCamelCase_: str ,lowerCamelCase_: Any ,lowerCamelCase_: Union[str, Any] ) -> Any:
# convolution process
UpperCAmelCase_ : Optional[Any] = convs[0]
UpperCAmelCase_ : int = convs[1]
UpperCAmelCase_ : int = np.shape(lowerCamelCase_ )[0]
# get the data slice of original image data, data_focus
UpperCAmelCase_ : Dict = []
for i_focus in range(0 ,size_data - size_conv + 1 ,lowerCamelCase_ ):
for j_focus in range(0 ,size_data - size_conv + 1 ,lowerCamelCase_ ):
UpperCAmelCase_ : Union[str, Any] = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(lowerCamelCase_ )
# calculate the feature map of every single kernel, and saved as list of matrix
UpperCAmelCase_ : Any = []
UpperCAmelCase_ : Tuple = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(lowerCamelCase_ ):
UpperCAmelCase_ : Optional[int] = []
for i_focus in range(len(lowerCamelCase_ ) ):
UpperCAmelCase_ : int = (
np.sum(np.multiply(data_focus[i_focus] ,w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(lowerCamelCase_ ) )
UpperCAmelCase_ : Union[str, Any] = np.asmatrix(lowerCamelCase_ ).reshape(
lowerCamelCase_ ,lowerCamelCase_ )
data_featuremap.append(lowerCamelCase_ )
# expanding the data slice to One dimenssion
UpperCAmelCase_ : Optional[Any] = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(lowerCamelCase_ ) )
UpperCAmelCase_ : Optional[int] = np.asarray(lowerCamelCase_ )
return focus_list, data_featuremap
def A__ ( self: Tuple ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Optional[Any]="average_pool" ) -> List[Any]:
# pooling process
UpperCAmelCase_ : Optional[Any] = len(featuremaps[0] )
UpperCAmelCase_ : Any = int(size_map / size_pooling )
UpperCAmelCase_ : Optional[int] = []
for i_map in range(len(lowerCamelCase_ ) ):
UpperCAmelCase_ : Any = featuremaps[i_map]
UpperCAmelCase_ : Tuple = []
for i_focus in range(0 ,lowerCamelCase_ ,lowerCamelCase_ ):
for j_focus in range(0 ,lowerCamelCase_ ,lowerCamelCase_ ):
UpperCAmelCase_ : str = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(lowerCamelCase_ ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(lowerCamelCase_ ) )
UpperCAmelCase_ : int = np.asmatrix(lowerCamelCase_ ).reshape(lowerCamelCase_ ,lowerCamelCase_ )
featuremap_pooled.append(lowerCamelCase_ )
return featuremap_pooled
def A__ ( self: Union[str, Any] ,lowerCamelCase_: Tuple ) -> Optional[int]:
# expanding three dimension data to one dimension list
UpperCAmelCase_ : List[Any] = []
for i in range(len(lowerCamelCase_ ) ):
UpperCAmelCase_ : Tuple = np.shape(data[i] )
UpperCAmelCase_ : Optional[int] = data[i].reshape(1 ,shapes[0] * shapes[1] )
UpperCAmelCase_ : Optional[int] = data_listed.getA().tolist()[0]
data_expanded.extend(lowerCamelCase_ )
UpperCAmelCase_ : int = np.asarray(lowerCamelCase_ )
return data_expanded
def A__ ( self: Optional[Any] ,lowerCamelCase_: Optional[int] ) -> Union[str, Any]:
# expanding matrix to one dimension list
UpperCAmelCase_ : List[Any] = np.asarray(lowerCamelCase_ )
UpperCAmelCase_ : str = np.shape(lowerCamelCase_ )
UpperCAmelCase_ : Dict = data_mat.reshape(1 ,shapes[0] * shapes[1] )
return data_expanded
def A__ ( self: str ,lowerCamelCase_: Dict ,lowerCamelCase_: int ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Any ) -> Union[str, Any]:
UpperCAmelCase_ : Any = []
UpperCAmelCase_ : Tuple = 0
for i_map in range(lowerCamelCase_ ):
UpperCAmelCase_ : Optional[Any] = np.ones((size_map, size_map) )
for i in range(0 ,lowerCamelCase_ ,lowerCamelCase_ ):
for j in range(0 ,lowerCamelCase_ ,lowerCamelCase_ ):
UpperCAmelCase_ : Any = pd_pool[
i_pool
]
UpperCAmelCase_ : List[str] = i_pool + 1
UpperCAmelCase_ : Optional[Any] = np.multiply(
lowerCamelCase_ ,np.multiply(out_map[i_map] ,(1 - out_map[i_map]) ) )
pd_all.append(lowerCamelCase_ )
return pd_all
def A__ ( self: str ,lowerCamelCase_: int ,lowerCamelCase_: int ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Any ,lowerCamelCase_: List[str] ,lowerCamelCase_: Any=bool ) -> Optional[int]:
# model traning
print("""----------------------Start Training-------------------------""" )
print((""" - - Shape: Train_Data """, np.shape(lowerCamelCase_ )) )
print((""" - - Shape: Teach_Data """, np.shape(lowerCamelCase_ )) )
UpperCAmelCase_ : str = 0
UpperCAmelCase_ : Tuple = []
UpperCAmelCase_ : Any = 10000
while rp < n_repeat and mse >= error_accuracy:
UpperCAmelCase_ : List[str] = 0
print(F'''-------------Learning Time {rp}--------------''' )
for p in range(len(lowerCamelCase_ ) ):
# print('------------Learning Image: %d--------------'%p)
UpperCAmelCase_ : str = np.asmatrix(datas_train[p] )
UpperCAmelCase_ : Optional[Any] = np.asarray(datas_teach[p] )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.convolute(
lowerCamelCase_ ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,)
UpperCAmelCase_ : List[Any] = self.pooling(lowerCamelCase_ ,self.size_poolinga )
UpperCAmelCase_ : int = np.shape(lowerCamelCase_ )
UpperCAmelCase_ : Dict = self._expand(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = data_bp_input
UpperCAmelCase_ : Optional[Any] = np.dot(lowerCamelCase_ ,self.vji.T ) - self.thre_bpa
UpperCAmelCase_ : int = self.sig(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = np.dot(lowerCamelCase_ ,self.wkj.T ) - self.thre_bpa
UpperCAmelCase_ : Optional[Any] = self.sig(lowerCamelCase_ )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
UpperCAmelCase_ : List[str] = np.multiply(
(data_teach - bp_outa) ,np.multiply(lowerCamelCase_ ,(1 - bp_outa) ) )
UpperCAmelCase_ : List[Any] = np.multiply(
np.dot(lowerCamelCase_ ,self.wkj ) ,np.multiply(lowerCamelCase_ ,(1 - bp_outa) ) )
UpperCAmelCase_ : Any = np.dot(lowerCamelCase_ ,self.vji )
UpperCAmelCase_ : Tuple = pd_i_all / (self.size_poolinga * self.size_poolinga)
UpperCAmelCase_ : List[str] = pd_conva_pooled.T.getA().tolist()
UpperCAmelCase_ : str = self._calculate_gradient_from_pool(
lowerCamelCase_ ,lowerCamelCase_ ,shape_featuremapa[0] ,shape_featuremapa[1] ,self.size_poolinga ,)
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
UpperCAmelCase_ : List[str] = self._expand_mat(pd_conva_all[k_conv] )
UpperCAmelCase_ : Optional[Any] = self.rate_weight * np.dot(lowerCamelCase_ ,lowerCamelCase_ )
UpperCAmelCase_ : int = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
UpperCAmelCase_ : str = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
UpperCAmelCase_ : int = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
UpperCAmelCase_ : Tuple = self.vji + pd_j_all.T * bp_outa * self.rate_weight
UpperCAmelCase_ : int = self.thre_bpa - pd_k_all * self.rate_thre
UpperCAmelCase_ : str = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
UpperCAmelCase_ : int = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
UpperCAmelCase_ : int = rp + 1
UpperCAmelCase_ : Any = error_count / patterns
all_mse.append(lowerCamelCase_ )
def draw_error():
UpperCAmelCase_ : Any = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(lowerCamelCase_ ,"""+-""" )
plt.plot(lowerCamelCase_ ,"""r--""" )
plt.xlabel("""Learning Times""" )
plt.ylabel("""All_mse""" )
plt.grid(lowerCamelCase_ ,alpha=0.5 )
plt.show()
print("""------------------Training Complished---------------------""" )
print((""" - - Training epoch: """, rp, F''' - - Mse: {mse:.6f}''') )
if draw_e:
draw_error()
return mse
def A__ ( self: Optional[int] ,lowerCamelCase_: Any ) -> Tuple:
# model predict
UpperCAmelCase_ : Union[str, Any] = []
print("""-------------------Start Testing-------------------------""" )
print((""" - - Shape: Test_Data """, np.shape(lowerCamelCase_ )) )
for p in range(len(lowerCamelCase_ ) ):
UpperCAmelCase_ : int = np.asmatrix(datas_test[p] )
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.convolute(
lowerCamelCase_ ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,)
UpperCAmelCase_ : Optional[Any] = self.pooling(lowerCamelCase_ ,self.size_poolinga )
UpperCAmelCase_ : str = self._expand(lowerCamelCase_ )
UpperCAmelCase_ : str = data_bp_input
UpperCAmelCase_ : Union[str, Any] = bp_outa * self.vji.T - self.thre_bpa
UpperCAmelCase_ : Optional[int] = self.sig(lowerCamelCase_ )
UpperCAmelCase_ : Tuple = bp_outa * self.wkj.T - self.thre_bpa
UpperCAmelCase_ : List[Any] = self.sig(lowerCamelCase_ )
produce_out.extend(bp_outa.getA().tolist() )
UpperCAmelCase_ : int = [list(map(self.do_round ,lowerCamelCase_ ) ) for each in produce_out]
return np.asarray(lowerCamelCase_ )
def A__ ( self: Optional[Any] ,lowerCamelCase_: Dict ) -> Tuple:
# return the data of image after convoluting process so we can check it out
UpperCAmelCase_ : Optional[int] = np.asmatrix(lowerCamelCase_ )
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self.convolute(
lowerCamelCase_ ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,)
UpperCAmelCase_ : Dict = self.pooling(lowerCamelCase_ ,self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 345 | 1 |
UpperCamelCase_ = 256
# Modulus to hash a string
UpperCamelCase_ = 1000003
def lowerCamelCase_ ( _a : str , _a : str ):
'''simple docstring'''
UpperCAmelCase_ : Any = len(_a )
UpperCAmelCase_ : List[Any] = len(_a )
if p_len > t_len:
return False
UpperCAmelCase_ : List[str] = 0
UpperCAmelCase_ : Tuple = 0
UpperCAmelCase_ : Optional[int] = 1
# Calculating the hash of pattern and substring of text
for i in range(_a ):
UpperCAmelCase_ : Dict = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
UpperCAmelCase_ : Any = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
UpperCAmelCase_ : Union[str, Any] = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
UpperCAmelCase_ : Union[str, Any] = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def lowerCamelCase_ ( ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = """abc1abc12"""
UpperCAmelCase_ : Optional[int] = """alskfjaldsabc1abc1abc12k23adsfabcabc"""
UpperCAmelCase_ : str = """alskfjaldsk23adsfabcabc"""
assert rabin_karp(_a , _a ) and not rabin_karp(_a , _a )
# Test 2)
UpperCAmelCase_ : List[Any] = """ABABX"""
UpperCAmelCase_ : int = """ABABZABABYABABX"""
assert rabin_karp(_a , _a )
# Test 3)
UpperCAmelCase_ : Tuple = """AAAB"""
UpperCAmelCase_ : Dict = """ABAAAAAB"""
assert rabin_karp(_a , _a )
# Test 4)
UpperCAmelCase_ : Union[str, Any] = """abcdabcy"""
UpperCAmelCase_ : Union[str, Any] = """abcxabcdabxabcdabcdabcy"""
assert rabin_karp(_a , _a )
# Test 5)
UpperCAmelCase_ : Dict = """Lü"""
UpperCAmelCase_ : Tuple = """Lüsai"""
assert rabin_karp(_a , _a )
UpperCAmelCase_ : Any = """Lue"""
assert not rabin_karp(_a , _a )
print("""Success.""" )
if __name__ == "__main__":
test_rabin_karp()
| 345 |
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _snake_case ( __snake_case , unittest.TestCase ):
'''simple docstring'''
A__ : Optional[Any] = CTRLTokenizer
A__ : Optional[Any] = False
A__ : str = False
def A__ ( self: Optional[int] ) -> List[Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase_ : Dict = ["""adapt""", """re@@""", """a@@""", """apt""", """c@@""", """t""", """<unk>"""]
UpperCAmelCase_ : Union[str, Any] = dict(zip(lowerCamelCase_ ,range(len(lowerCamelCase_ ) ) ) )
UpperCAmelCase_ : List[Any] = ["""#version: 0.2""", """a p""", """ap t</w>""", """r e""", """a d""", """ad apt</w>""", """"""]
UpperCAmelCase_ : Optional[Any] = {"""unk_token""": """<unk>"""}
UpperCAmelCase_ : Union[str, Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCAmelCase_ : Optional[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCamelCase_ ) + """\n""" )
with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowerCamelCase_ ) )
def A__ ( self: Optional[int] ,**lowerCamelCase_: Any ) -> str:
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname ,**lowerCamelCase_ )
def A__ ( self: int ,lowerCamelCase_: int ) -> str:
UpperCAmelCase_ : List[str] = """adapt react readapt apt"""
UpperCAmelCase_ : List[Any] = """adapt react readapt apt"""
return input_text, output_text
def A__ ( self: Union[str, Any] ) -> Optional[int]:
UpperCAmelCase_ : Union[str, Any] = CTRLTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
UpperCAmelCase_ : List[Any] = """adapt react readapt apt"""
UpperCAmelCase_ : Optional[int] = """adapt re@@ a@@ c@@ t re@@ adapt apt""".split()
UpperCAmelCase_ : Tuple = tokenizer.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ ,lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = tokens + [tokenizer.unk_token]
UpperCAmelCase_ : List[str] = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) ,lowerCamelCase_ )
| 345 | 1 |
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
UpperCamelCase_ = {
'''susnato/ernie-m-base_pytorch''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json''',
'''susnato/ernie-m-large_pytorch''': '''https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json''',
}
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : Union[str, Any] = "ernie_m"
A__ : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self: str ,lowerCamelCase_: int = 250002 ,lowerCamelCase_: int = 768 ,lowerCamelCase_: int = 12 ,lowerCamelCase_: int = 12 ,lowerCamelCase_: int = 3072 ,lowerCamelCase_: str = "gelu" ,lowerCamelCase_: float = 0.1 ,lowerCamelCase_: float = 0.1 ,lowerCamelCase_: int = 514 ,lowerCamelCase_: float = 0.0_2 ,lowerCamelCase_: int = 1 ,lowerCamelCase_: float = 1e-05 ,lowerCamelCase_: Any=None ,lowerCamelCase_: List[Any]=False ,lowerCamelCase_: Tuple=0.0 ,**lowerCamelCase_: Optional[int] ,) -> Optional[Any]:
super().__init__(pad_token_id=lowerCamelCase_ ,**lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = vocab_size
UpperCAmelCase_ : Any = hidden_size
UpperCAmelCase_ : Optional[Any] = num_hidden_layers
UpperCAmelCase_ : Union[str, Any] = num_attention_heads
UpperCAmelCase_ : List[Any] = intermediate_size
UpperCAmelCase_ : List[Any] = hidden_act
UpperCAmelCase_ : Any = hidden_dropout_prob
UpperCAmelCase_ : List[Any] = attention_probs_dropout_prob
UpperCAmelCase_ : str = max_position_embeddings
UpperCAmelCase_ : Union[str, Any] = initializer_range
UpperCAmelCase_ : Union[str, Any] = layer_norm_eps
UpperCAmelCase_ : List[Any] = classifier_dropout
UpperCAmelCase_ : str = is_decoder
UpperCAmelCase_ : List[str] = act_dropout
| 345 |
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
UpperCamelCase_ = {
'''susnato/ernie-m-base_pytorch''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json''',
'''susnato/ernie-m-large_pytorch''': '''https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json''',
}
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : Union[str, Any] = "ernie_m"
A__ : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self: str ,lowerCamelCase_: int = 250002 ,lowerCamelCase_: int = 768 ,lowerCamelCase_: int = 12 ,lowerCamelCase_: int = 12 ,lowerCamelCase_: int = 3072 ,lowerCamelCase_: str = "gelu" ,lowerCamelCase_: float = 0.1 ,lowerCamelCase_: float = 0.1 ,lowerCamelCase_: int = 514 ,lowerCamelCase_: float = 0.0_2 ,lowerCamelCase_: int = 1 ,lowerCamelCase_: float = 1e-05 ,lowerCamelCase_: Any=None ,lowerCamelCase_: List[Any]=False ,lowerCamelCase_: Tuple=0.0 ,**lowerCamelCase_: Optional[int] ,) -> Optional[Any]:
super().__init__(pad_token_id=lowerCamelCase_ ,**lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = vocab_size
UpperCAmelCase_ : Any = hidden_size
UpperCAmelCase_ : Optional[Any] = num_hidden_layers
UpperCAmelCase_ : Union[str, Any] = num_attention_heads
UpperCAmelCase_ : List[Any] = intermediate_size
UpperCAmelCase_ : List[Any] = hidden_act
UpperCAmelCase_ : Any = hidden_dropout_prob
UpperCAmelCase_ : List[Any] = attention_probs_dropout_prob
UpperCAmelCase_ : str = max_position_embeddings
UpperCAmelCase_ : Union[str, Any] = initializer_range
UpperCAmelCase_ : Union[str, Any] = layer_norm_eps
UpperCAmelCase_ : List[Any] = classifier_dropout
UpperCAmelCase_ : str = is_decoder
UpperCAmelCase_ : List[str] = act_dropout
| 345 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = torch.device('''cpu''')
def lowerCamelCase_ ( ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCAmelCase_ : List[Any] = Image.open(requests.get(_a , stream=_a ).raw )
return im
def lowerCamelCase_ ( _a : Tuple ):
'''simple docstring'''
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1703E00, 2.1107E00, -2.0811E00, 8.8685E-01, 2.4360E-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9636E-01, 2.3478E-01, -1.6963E00, -1.7381E00, -8.6337E-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2768E-01, -4.7429E-01, -1.0897E00, -1.0248E00, 3.5523E-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5330E-01, 2.4211E-01, -6.0185E-01, -8.2789E-01, -6.0446E-02] )
def lowerCamelCase_ ( _a : str , _a : Tuple , _a : Any ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = dct.pop(_a )
UpperCAmelCase_ : Tuple = val
def lowerCamelCase_ ( _a : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Any = []
for k in state_dict.keys():
UpperCAmelCase_ : Optional[int] = k
if ".pwconv" in k:
UpperCAmelCase_ : Any = k_new.replace(""".pwconv""" , """.point_wise_conv""" )
if ".dwconv" in k:
UpperCAmelCase_ : Any = k_new.replace(""".dwconv""" , """.depth_wise_conv""" )
if ".Proj." in k:
UpperCAmelCase_ : Optional[int] = k_new.replace(""".Proj.""" , """.proj.""" )
if "patch_embed" in k_new:
UpperCAmelCase_ : Any = k_new.replace("""patch_embed""" , """swiftformer.patch_embed.patch_embedding""" )
if "network" in k_new:
UpperCAmelCase_ : Tuple = k_new.split(""".""" )
if ls[2].isdigit():
UpperCAmelCase_ : List[str] = """swiftformer.encoder.network.""" + ls[1] + """.blocks.""" + ls[2] + """.""" + """.""".join(ls[3:] )
else:
UpperCAmelCase_ : int = k_new.replace("""network""" , """swiftformer.encoder.network""" )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def lowerCamelCase_ ( _a : Optional[int] , _a : List[Any] , _a : Any ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
UpperCAmelCase_ : List[str] = 1000
UpperCAmelCase_ : Tuple = """huggingface/label-files"""
UpperCAmelCase_ : Optional[int] = """imagenet-1k-id2label.json"""
UpperCAmelCase_ : Dict = json.load(open(hf_hub_download(_a , _a , repo_type="""dataset""" ) , """r""" ) )
UpperCAmelCase_ : Optional[Any] = {int(_a ): v for k, v in idalabel.items()}
UpperCAmelCase_ : Any = idalabel
UpperCAmelCase_ : Union[str, Any] = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
UpperCAmelCase_ : str = [3, 3, 6, 4]
UpperCAmelCase_ : Union[str, Any] = [48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
UpperCAmelCase_ : Dict = [3, 3, 9, 6]
UpperCAmelCase_ : Tuple = [48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
UpperCAmelCase_ : List[str] = [4, 3, 10, 5]
UpperCAmelCase_ : List[str] = [48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
UpperCAmelCase_ : Any = [4, 4, 12, 6]
UpperCAmelCase_ : str = [64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith("""https""" ):
UpperCAmelCase_ : Tuple = torch.hub.load_state_dict_from_url(_a , map_location="""cpu""" , check_hash=_a )
else:
UpperCAmelCase_ : int = torch.load(_a , map_location="""cpu""" )
UpperCAmelCase_ : Tuple = checkpoint
UpperCAmelCase_ : Union[str, Any] = create_rename_keys(_a )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(_a , _a , _a )
# load HuggingFace model
UpperCAmelCase_ : Optional[Any] = SwiftFormerForImageClassification(_a ).eval()
hf_model.load_state_dict(_a )
# prepare test inputs
UpperCAmelCase_ : str = prepare_img()
UpperCAmelCase_ : Dict = ViTImageProcessor.from_pretrained("""preprocessor_config""" )
UpperCAmelCase_ : List[str] = processor(images=_a , return_tensors="""pt""" )
# compare outputs from both models
UpperCAmelCase_ : Any = get_expected_output(_a )
UpperCAmelCase_ : Tuple = hf_model(inputs["""pixel_values"""] ).logits
assert hf_logits.shape == torch.Size([1, 1000] )
assert torch.allclose(hf_logits[0, 0:5] , _a , atol=1E-3 )
Path(_a ).mkdir(exist_ok=_a )
print(F'''Saving model {swiftformer_name} to {pytorch_dump_folder_path}''' )
hf_model.save_pretrained(_a )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swiftformer_name''',
default='''swiftformer_xs''',
choices=['''swiftformer_xs''', '''swiftformer_s''', '''swiftformer_l1''', '''swiftformer_l3'''],
type=str,
help='''Name of the SwiftFormer model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''./converted_outputs/''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--original_ckpt''', default=None, type=str, help='''Path to the original model checkpoint.''')
UpperCamelCase_ = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 345 |
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
UpperCamelCase_ = logging.getLogger(__name__)
torch.set_grad_enabled(False)
UpperCamelCase_ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
def lowerCamelCase_ ( _a : str , _a : Any=100 , _a : int=" " ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = text.split(_a )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(_a ) , _a )]
def lowerCamelCase_ ( _a : dict ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ : Dict = [], []
for title, text in zip(documents["""title"""] , documents["""text"""] ):
if text is not None:
for passage in split_text(_a ):
titles.append(title if title is not None else """""" )
texts.append(_a )
return {"title": titles, "text": texts}
def lowerCamelCase_ ( _a : dict , _a : DPRContextEncoder , _a : DPRContextEncoderTokenizerFast ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = ctx_tokenizer(
documents["""title"""] , documents["""text"""] , truncation=_a , padding="""longest""" , return_tensors="""pt""" )["""input_ids"""]
UpperCAmelCase_ : Tuple = ctx_encoder(input_ids.to(device=_a ) , return_dict=_a ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def lowerCamelCase_ ( _a : "RagExampleArguments" , _a : "ProcessingArguments" , _a : "IndexHnswArguments" , ):
'''simple docstring'''
logger.info("""Step 1 - Create the dataset""" )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
UpperCAmelCase_ : Optional[int] = load_dataset(
"""csv""" , data_files=[rag_example_args.csv_path] , split="""train""" , delimiter="""\t""" , column_names=["""title""", """text"""] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
UpperCAmelCase_ : Tuple = dataset.map(_a , batched=_a , num_proc=processing_args.num_proc )
# And compute the embeddings
UpperCAmelCase_ : List[str] = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=_a )
UpperCAmelCase_ : Dict = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
UpperCAmelCase_ : Any = Features(
{"""text""": Value("""string""" ), """title""": Value("""string""" ), """embeddings""": Sequence(Value("""float32""" ) )} ) # optional, save as float32 instead of float64 to save space
UpperCAmelCase_ : List[str] = dataset.map(
partial(_a , ctx_encoder=_a , ctx_tokenizer=_a ) , batched=_a , batch_size=processing_args.batch_size , features=_a , )
# And finally save your dataset
UpperCAmelCase_ : Union[str, Any] = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset""" )
dataset.save_to_disk(_a )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info("""Step 2 - Index the dataset""" )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
UpperCAmelCase_ : Union[str, Any] = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index("""embeddings""" , custom_index=_a )
# And save the index
UpperCAmelCase_ : Optional[Any] = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset_hnsw_index.faiss""" )
dataset.get_index("""embeddings""" ).save(_a )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class _snake_case :
'''simple docstring'''
A__ : str = field(
default=str(Path(__snake_case ).parent / "test_run" / "dummy-kb" / "my_knowledge_dataset.csv" ) , metadata={"help": "Path to a tab-separated csv file with columns 'title' and 'text'"} , )
A__ : Optional[str] = field(
default=__snake_case , metadata={"help": "Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."} , )
A__ : str = field(
default="facebook/rag-sequence-nq" , metadata={"help": "The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"} , )
A__ : str = field(
default="facebook/dpr-ctx_encoder-multiset-base" , metadata={
"help": (
"The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or"
" 'facebook/dpr-ctx_encoder-multiset-base'"
)
} , )
A__ : Optional[str] = field(
default=str(Path(__snake_case ).parent / "test_run" / "dummy-kb" ) , metadata={"help": "Path to a directory where the dataset passages and the index will be saved"} , )
@dataclass
class _snake_case :
'''simple docstring'''
A__ : Optional[int] = field(
default=__snake_case , metadata={
"help": "The number of processes to use to split the documents into passages. Default is single process."
} , )
A__ : int = field(
default=16 , metadata={
"help": "The batch size to use when computing the passages embeddings using the DPR context encoder."
} , )
@dataclass
class _snake_case :
'''simple docstring'''
A__ : int = field(
default=768 , metadata={"help": "The dimension of the embeddings to pass to the HNSW Faiss index."} , )
A__ : int = field(
default=128 , metadata={
"help": (
"The number of bi-directional links created for every new element during the HNSW index construction."
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
UpperCamelCase_ = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
UpperCamelCase_ = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 345 | 1 |
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def lowerCamelCase_ ( _a : str , _a : float | Decimal , _a : float = 10**-10 ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = a
while True:
UpperCAmelCase_ : Optional[int] = Decimal(_a ) - (
Decimal(eval(_a ) ) / Decimal(eval(str(diff(_a ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(_a ) ) < precision: # noqa: S307
return float(_a )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F"The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}")
# Find root of polynomial
print(F"The root of x**2 - 5*x + 2 = 0 is {newton_raphson('x**2 - 5*x + 2', 0.4)}")
# Find Square Root of 5
print(F"The root of log(x) - 1 = 0 is {newton_raphson('log(x) - 1', 2)}")
# Exponential Roots
print(F"The root of exp(x) - 1 = 0 is {newton_raphson('exp(x) - 1', 0)}")
| 345 |
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class _snake_case ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
A__ : Dict = AutoencoderKL
A__ : Optional[int] = "sample"
A__ : Tuple = 1E-2
@property
def A__ ( self: List[Any] ) -> Union[str, Any]:
UpperCAmelCase_ : Tuple = 4
UpperCAmelCase_ : str = 3
UpperCAmelCase_ : Any = (32, 32)
UpperCAmelCase_ : Optional[int] = floats_tensor((batch_size, num_channels) + sizes ).to(lowerCamelCase_ )
return {"sample": image}
@property
def A__ ( self: List[str] ) -> Tuple:
return (3, 32, 32)
@property
def A__ ( self: Optional[Any] ) -> Any:
return (3, 32, 32)
def A__ ( self: Any ) -> Tuple:
UpperCAmelCase_ : List[Any] = {
"""block_out_channels""": [32, 64],
"""in_channels""": 3,
"""out_channels""": 3,
"""down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],
"""up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],
"""latent_channels""": 4,
}
UpperCAmelCase_ : int = self.dummy_input
return init_dict, inputs_dict
def A__ ( self: Optional[Any] ) -> int:
pass
def A__ ( self: str ) -> Any:
pass
@unittest.skipIf(torch_device == """mps""" ,"""Gradient checkpointing skipped on MPS""" )
def A__ ( self: Union[str, Any] ) -> Dict:
# enable deterministic behavior for gradient checkpointing
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.prepare_init_args_and_inputs_for_common()
UpperCAmelCase_ : List[Any] = self.model_class(**lowerCamelCase_ )
model.to(lowerCamelCase_ )
assert not model.is_gradient_checkpointing and model.training
UpperCAmelCase_ : Optional[Any] = model(**lowerCamelCase_ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
UpperCAmelCase_ : Any = torch.randn_like(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
UpperCAmelCase_ : str = self.model_class(**lowerCamelCase_ )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(lowerCamelCase_ )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
UpperCAmelCase_ : Optional[int] = model_a(**lowerCamelCase_ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
UpperCAmelCase_ : Dict = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1e-5 )
UpperCAmelCase_ : Dict = dict(model.named_parameters() )
UpperCAmelCase_ : Union[str, Any] = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data ,named_params_a[name].grad.data ,atol=5e-5 ) )
def A__ ( self: Optional[Any] ) -> str:
UpperCAmelCase_ , UpperCAmelCase_ : int = AutoencoderKL.from_pretrained("""fusing/autoencoder-kl-dummy""" ,output_loading_info=lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
self.assertEqual(len(loading_info["""missing_keys"""] ) ,0 )
model.to(lowerCamelCase_ )
UpperCAmelCase_ : Dict = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def A__ ( self: Optional[int] ) -> int:
UpperCAmelCase_ : Dict = AutoencoderKL.from_pretrained("""fusing/autoencoder-kl-dummy""" )
UpperCAmelCase_ : Tuple = model.to(lowerCamelCase_ )
model.eval()
if torch_device == "mps":
UpperCAmelCase_ : Tuple = torch.manual_seed(0 )
else:
UpperCAmelCase_ : Optional[int] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
UpperCAmelCase_ : str = torch.randn(
1 ,model.config.in_channels ,model.config.sample_size ,model.config.sample_size ,generator=torch.manual_seed(0 ) ,)
UpperCAmelCase_ : int = image.to(lowerCamelCase_ )
with torch.no_grad():
UpperCAmelCase_ : Dict = model(lowerCamelCase_ ,sample_posterior=lowerCamelCase_ ,generator=lowerCamelCase_ ).sample
UpperCAmelCase_ : Optional[int] = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
UpperCAmelCase_ : Tuple = torch.tensor(
[
-4.0078e-01,
-3.8323e-04,
-1.2681e-01,
-1.1462e-01,
2.0095e-01,
1.0893e-01,
-8.8247e-02,
-3.0361e-01,
-9.8644e-03,
] )
elif torch_device == "cpu":
UpperCAmelCase_ : List[str] = torch.tensor(
[-0.1_3_5_2, 0.0_8_7_8, 0.0_4_1_9, -0.0_8_1_8, -0.1_0_6_9, 0.0_6_8_8, -0.1_4_5_8, -0.4_4_4_6, -0.0_0_2_6] )
else:
UpperCAmelCase_ : List[str] = torch.tensor(
[-0.2_4_2_1, 0.4_6_4_2, 0.2_5_0_7, -0.0_4_3_8, 0.0_6_8_2, 0.3_1_6_0, -0.2_0_1_8, -0.0_7_2_7, 0.2_4_8_5] )
self.assertTrue(torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,rtol=1e-2 ) )
@slow
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: Any ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Any ) -> Optional[Any]:
return F'''gaussian_noise_s={seed}_shape={'_'.join([str(lowerCamelCase_ ) for s in shape] )}.npy'''
def A__ ( self: Union[str, Any] ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self: List[str] ,lowerCamelCase_: Optional[int]=0 ,lowerCamelCase_: List[Any]=(4, 3, 512, 512) ,lowerCamelCase_: Optional[Any]=False ) -> Optional[int]:
UpperCAmelCase_ : Tuple = torch.floataa if fpaa else torch.floataa
UpperCAmelCase_ : Tuple = torch.from_numpy(load_hf_numpy(self.get_file_format(lowerCamelCase_ ,lowerCamelCase_ ) ) ).to(lowerCamelCase_ ).to(lowerCamelCase_ )
return image
def A__ ( self: List[Any] ,lowerCamelCase_: List[str]="CompVis/stable-diffusion-v1-4" ,lowerCamelCase_: Union[str, Any]=False ) -> Any:
UpperCAmelCase_ : Optional[Any] = """fp16""" if fpaa else None
UpperCAmelCase_ : str = torch.floataa if fpaa else torch.floataa
UpperCAmelCase_ : int = AutoencoderKL.from_pretrained(
lowerCamelCase_ ,subfolder="""vae""" ,torch_dtype=lowerCamelCase_ ,revision=lowerCamelCase_ ,)
model.to(lowerCamelCase_ ).eval()
return model
def A__ ( self: Dict ,lowerCamelCase_: Union[str, Any]=0 ) -> Optional[int]:
if torch_device == "mps":
return torch.manual_seed(lowerCamelCase_ )
return torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_6_0_3, 0.9_8_7_8, -0.0_4_9_5, -0.0_7_9_0, -0.2_7_0_9, 0.8_3_7_5, -0.2_0_6_0, -0.0_8_2_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]],
[47, [-0.2_3_7_6, 0.1_1_6_8, 0.1_3_3_2, -0.4_8_4_0, -0.2_5_0_8, -0.0_7_9_1, -0.0_4_9_3, -0.4_0_8_9], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]],
# fmt: on
] )
def A__ ( self: List[Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: str ,lowerCamelCase_: Dict ) -> Tuple:
UpperCAmelCase_ : List[Any] = self.get_sd_vae_model()
UpperCAmelCase_ : int = self.get_sd_image(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = self.get_generator(lowerCamelCase_ )
with torch.no_grad():
UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ ,generator=lowerCamelCase_ ,sample_posterior=lowerCamelCase_ ).sample
assert sample.shape == image.shape
UpperCAmelCase_ : Optional[Any] = sample[-1, -2:, -2:, :2].flatten().float().cpu()
UpperCAmelCase_ : Tuple = torch.tensor(expected_slice_mps if torch_device == """mps""" else expected_slice )
assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0_5_1_3, 0.0_2_8_9, 1.3_7_9_9, 0.2_1_6_6, -0.2_5_7_3, -0.0_8_7_1, 0.5_1_0_3, -0.0_9_9_9]],
[47, [-0.4_1_2_8, -0.1_3_2_0, -0.3_7_0_4, 0.1_9_6_5, -0.4_1_1_6, -0.2_3_3_2, -0.3_3_4_0, 0.2_2_4_7]],
# fmt: on
] )
@require_torch_gpu
def A__ ( self: Union[str, Any] ,lowerCamelCase_: Any ,lowerCamelCase_: List[str] ) -> Tuple:
UpperCAmelCase_ : List[str] = self.get_sd_vae_model(fpaa=lowerCamelCase_ )
UpperCAmelCase_ : Any = self.get_sd_image(lowerCamelCase_ ,fpaa=lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = self.get_generator(lowerCamelCase_ )
with torch.no_grad():
UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ ,generator=lowerCamelCase_ ,sample_posterior=lowerCamelCase_ ).sample
assert sample.shape == image.shape
UpperCAmelCase_ : Tuple = sample[-1, -2:, :2, -2:].flatten().float().cpu()
UpperCAmelCase_ : Optional[int] = torch.tensor(lowerCamelCase_ )
assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_6_0_9, 0.9_8_6_6, -0.0_4_8_7, -0.0_7_7_7, -0.2_7_1_6, 0.8_3_6_8, -0.2_0_5_5, -0.0_8_1_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]],
[47, [-0.2_3_7_7, 0.1_1_4_7, 0.1_3_3_3, -0.4_8_4_1, -0.2_5_0_6, -0.0_8_0_5, -0.0_4_9_1, -0.4_0_8_5], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]],
# fmt: on
] )
def A__ ( self: Tuple ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: List[str] ) -> Dict:
UpperCAmelCase_ : Optional[int] = self.get_sd_vae_model()
UpperCAmelCase_ : Dict = self.get_sd_image(lowerCamelCase_ )
with torch.no_grad():
UpperCAmelCase_ : str = model(lowerCamelCase_ ).sample
assert sample.shape == image.shape
UpperCAmelCase_ : List[Any] = sample[-1, -2:, -2:, :2].flatten().float().cpu()
UpperCAmelCase_ : Any = torch.tensor(expected_slice_mps if torch_device == """mps""" else expected_slice )
assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2_0_5_1, -0.1_8_0_3, -0.2_3_1_1, -0.2_1_1_4, -0.3_2_9_2, -0.3_5_7_4, -0.2_9_5_3, -0.3_3_2_3]],
[37, [-0.2_6_3_2, -0.2_6_2_5, -0.2_1_9_9, -0.2_7_4_1, -0.4_5_3_9, -0.4_9_9_0, -0.3_7_2_0, -0.4_9_2_5]],
# fmt: on
] )
@require_torch_gpu
def A__ ( self: Optional[Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: str ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = self.get_sd_vae_model()
UpperCAmelCase_ : Optional[int] = self.get_sd_image(lowerCamelCase_ ,shape=(3, 4, 64, 64) )
with torch.no_grad():
UpperCAmelCase_ : str = model.decode(lowerCamelCase_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
UpperCAmelCase_ : Any = sample[-1, -2:, :2, -2:].flatten().cpu()
UpperCAmelCase_ : Union[str, Any] = torch.tensor(lowerCamelCase_ )
assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=1e-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0_3_6_9, 0.0_2_0_7, -0.0_7_7_6, -0.0_6_8_2, -0.1_7_4_7, -0.1_9_3_0, -0.1_4_6_5, -0.2_0_3_9]],
[16, [-0.1_6_2_8, -0.2_1_3_4, -0.2_7_4_7, -0.2_6_4_2, -0.3_7_7_4, -0.4_4_0_4, -0.3_6_8_7, -0.4_2_7_7]],
# fmt: on
] )
@require_torch_gpu
def A__ ( self: str ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Any ) -> Optional[Any]:
UpperCAmelCase_ : Dict = self.get_sd_vae_model(fpaa=lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = self.get_sd_image(lowerCamelCase_ ,shape=(3, 4, 64, 64) ,fpaa=lowerCamelCase_ )
with torch.no_grad():
UpperCAmelCase_ : List[str] = model.decode(lowerCamelCase_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
UpperCAmelCase_ : str = sample[-1, -2:, :2, -2:].flatten().float().cpu()
UpperCAmelCase_ : str = torch.tensor(lowerCamelCase_ )
assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=5e-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() ,reason="""xformers is not required when using PyTorch 2.0.""" )
def A__ ( self: List[Any] ,lowerCamelCase_: Union[str, Any] ) -> int:
UpperCAmelCase_ : Optional[Any] = self.get_sd_vae_model(fpaa=lowerCamelCase_ )
UpperCAmelCase_ : List[str] = self.get_sd_image(lowerCamelCase_ ,shape=(3, 4, 64, 64) ,fpaa=lowerCamelCase_ )
with torch.no_grad():
UpperCAmelCase_ : Optional[Any] = model.decode(lowerCamelCase_ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
UpperCAmelCase_ : List[str] = model.decode(lowerCamelCase_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=1e-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() ,reason="""xformers is not required when using PyTorch 2.0.""" )
def A__ ( self: Optional[Any] ,lowerCamelCase_: Dict ) -> Union[str, Any]:
UpperCAmelCase_ : Tuple = self.get_sd_vae_model()
UpperCAmelCase_ : Any = self.get_sd_image(lowerCamelCase_ ,shape=(3, 4, 64, 64) )
with torch.no_grad():
UpperCAmelCase_ : Union[str, Any] = model.decode(lowerCamelCase_ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
UpperCAmelCase_ : Optional[Any] = model.decode(lowerCamelCase_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3_0_0_1, 0.0_9_1_8, -2.6_9_8_4, -3.9_7_2_0, -3.2_0_9_9, -5.0_3_5_3, 1.7_3_3_8, -0.2_0_6_5, 3.4_2_6_7]],
[47, [-1.5_0_3_0, -4.3_8_7_1, -6.0_3_5_5, -9.1_1_5_7, -1.6_6_6_1, -2.7_8_5_3, 2.1_6_0_7, -5.0_8_2_3, 2.5_6_3_3]],
# fmt: on
] )
def A__ ( self: Union[str, Any] ,lowerCamelCase_: Any ,lowerCamelCase_: Union[str, Any] ) -> Union[str, Any]:
UpperCAmelCase_ : Dict = self.get_sd_vae_model()
UpperCAmelCase_ : Optional[Any] = self.get_sd_image(lowerCamelCase_ )
UpperCAmelCase_ : str = self.get_generator(lowerCamelCase_ )
with torch.no_grad():
UpperCAmelCase_ : int = model.encode(lowerCamelCase_ ).latent_dist
UpperCAmelCase_ : Optional[Any] = dist.sample(generator=lowerCamelCase_ )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
UpperCAmelCase_ : Tuple = sample[0, -1, -3:, -3:].flatten().cpu()
UpperCAmelCase_ : Optional[Any] = torch.tensor(lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = 3e-3 if torch_device != """mps""" else 1e-2
assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=lowerCamelCase_ )
| 345 | 1 |
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
UpperCamelCase_ = namedtuple(
'''_TestCommandArgs''',
[
'''dataset''',
'''name''',
'''cache_dir''',
'''data_dir''',
'''all_configs''',
'''save_infos''',
'''ignore_verifications''',
'''force_redownload''',
'''clear_cache''',
],
defaults=[None, None, None, False, False, False, False, False],
)
def lowerCamelCase_ ( _a : List[Any] , _a : List[Any] ):
'''simple docstring'''
return (abs(source - target ) / target) < 0.0_1
@pytest.mark.integration
def lowerCamelCase_ ( _a : Any ):
'''simple docstring'''
UpperCAmelCase_ : Any = _TestCommandArgs(dataset=_a , all_configs=_a , save_infos=_a )
UpperCAmelCase_ : Tuple = TestCommand(*_a )
test_command.run()
UpperCAmelCase_ : Optional[int] = os.path.join(_a , """README.md""" )
assert os.path.exists(_a )
UpperCAmelCase_ : Optional[Any] = DatasetInfosDict.from_directory(_a )
UpperCAmelCase_ : Tuple = DatasetInfosDict(
{
"""default""": DatasetInfo(
features=Features(
{
"""tokens""": Sequence(Value("""string""" ) ),
"""ner_tags""": Sequence(
ClassLabel(names=["""O""", """B-PER""", """I-PER""", """B-ORG""", """I-ORG""", """B-LOC""", """I-LOC"""] ) ),
"""langs""": Sequence(Value("""string""" ) ),
"""spans""": Sequence(Value("""string""" ) ),
} ) , splits=[
{
"""name""": """train""",
"""num_bytes""": 235_1563,
"""num_examples""": 1_0000,
},
{
"""name""": """validation""",
"""num_bytes""": 23_8418,
"""num_examples""": 1000,
},
] , download_size=394_0680 , dataset_size=258_9981 , )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
UpperCAmelCase_ , UpperCAmelCase_ : Dict = getattr(dataset_infos["""default"""] , _a ), getattr(expected_dataset_infos["""default"""] , _a )
if key == "num_bytes":
assert is_apercent_close(_a , _a )
elif key == "splits":
assert list(_a ) == list(_a )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes )
else:
result == expected
| 345 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=__snake_case )
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : str = field(default="automatic-speech-recognition" , metadata={"include_in_asdict_even_if_is_default": True} )
A__ : ClassVar[Features] = Features({"audio": Audio()} )
A__ : ClassVar[Features] = Features({"transcription": Value("string" )} )
A__ : str = "audio"
A__ : str = "transcription"
def A__ ( self: int ,lowerCamelCase_: Union[str, Any] ) -> Optional[Any]:
if self.audio_column not in features:
raise ValueError(F'''Column {self.audio_column} is not present in features.''' )
if not isinstance(features[self.audio_column] ,lowerCamelCase_ ):
raise ValueError(F'''Column {self.audio_column} is not an Audio type.''' )
UpperCAmelCase_ : Any = copy.deepcopy(self )
UpperCAmelCase_ : Union[str, Any] = self.input_schema.copy()
UpperCAmelCase_ : Any = features[self.audio_column]
UpperCAmelCase_ : Union[str, Any] = input_schema
return task_template
@property
def A__ ( self: List[str] ) -> Dict[str, str]:
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 345 | 1 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {'''vocab_file''': '''spiece.model'''}
UpperCamelCase_ = {
'''vocab_file''': {
'''TsinghuaAI/CPM-Generate''': '''https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model''',
}
}
class _snake_case ( __snake_case ):
'''simple docstring'''
def __init__( self: Tuple ,lowerCamelCase_: Tuple ,lowerCamelCase_: List[str]=False ,lowerCamelCase_: List[Any]=True ,lowerCamelCase_: Tuple=False ,lowerCamelCase_: List[Any]="<s>" ,lowerCamelCase_: Dict="</s>" ,lowerCamelCase_: Any="<unk>" ,lowerCamelCase_: Dict="<sep>" ,lowerCamelCase_: List[Any]="<pad>" ,lowerCamelCase_: List[str]="<cls>" ,lowerCamelCase_: Optional[Any]="<mask>" ,lowerCamelCase_: List[Any]=["<eop>", "<eod>"] ,lowerCamelCase_: Optional[Dict[str, Any]] = None ,**lowerCamelCase_: Optional[int] ,) -> None:
UpperCAmelCase_ : Tuple = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else mask_token
UpperCAmelCase_ : Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=lowerCamelCase_ ,remove_space=lowerCamelCase_ ,keep_accents=lowerCamelCase_ ,bos_token=lowerCamelCase_ ,eos_token=lowerCamelCase_ ,unk_token=lowerCamelCase_ ,sep_token=lowerCamelCase_ ,pad_token=lowerCamelCase_ ,cls_token=lowerCamelCase_ ,mask_token=lowerCamelCase_ ,additional_special_tokens=lowerCamelCase_ ,sp_model_kwargs=self.sp_model_kwargs ,**lowerCamelCase_ ,)
UpperCAmelCase_ : Any = 3
UpperCAmelCase_ : Tuple = do_lower_case
UpperCAmelCase_ : Tuple = remove_space
UpperCAmelCase_ : Dict = keep_accents
UpperCAmelCase_ : str = vocab_file
UpperCAmelCase_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCamelCase_ )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"""You need to install jieba to use CpmTokenizer or CpmTokenizerFast. """
"""See https://pypi.org/project/jieba/ for installation.""" )
UpperCAmelCase_ : Any = jieba
UpperCAmelCase_ : Tuple = str.maketrans(""" \n""" ,"""\u2582\u2583""" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def A__ ( self: Dict ) -> Tuple:
return len(self.sp_model )
def A__ ( self: Tuple ) -> str:
UpperCAmelCase_ : str = {self.convert_ids_to_tokens(lowerCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self: Optional[Any] ) -> List[str]:
UpperCAmelCase_ : Dict = self.__dict__.copy()
UpperCAmelCase_ : List[str] = None
return state
def __setstate__( self: Optional[Any] ,lowerCamelCase_: List[str] ) -> Optional[int]:
UpperCAmelCase_ : List[Any] = d
# for backward compatibility
if not hasattr(self ,"""sp_model_kwargs""" ):
UpperCAmelCase_ : List[str] = {}
UpperCAmelCase_ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def A__ ( self: Optional[Any] ,lowerCamelCase_: int ) -> List[Any]:
if self.remove_space:
UpperCAmelCase_ : Any = """ """.join(inputs.strip().split() )
else:
UpperCAmelCase_ : List[str] = inputs
UpperCAmelCase_ : Optional[int] = outputs.replace("""``""" ,"""\"""" ).replace("""''""" ,"""\"""" )
if not self.keep_accents:
UpperCAmelCase_ : List[str] = unicodedata.normalize("""NFKD""" ,lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = """""".join([c for c in outputs if not unicodedata.combining(lowerCamelCase_ )] )
if self.do_lower_case:
UpperCAmelCase_ : int = outputs.lower()
return outputs
def A__ ( self: Any ,lowerCamelCase_: str ) -> List[str]:
UpperCAmelCase_ : Dict = self.preprocess_text(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = self.sp_model.encode(lowerCamelCase_ ,out_type=lowerCamelCase_ )
UpperCAmelCase_ : List[str] = []
for piece in pieces:
if len(lowerCamelCase_ ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
UpperCAmelCase_ : int = self.sp_model.EncodeAsPieces(piece[:-1].replace(lowerCamelCase_ ,"""""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
UpperCAmelCase_ : Union[str, Any] = cur_pieces[1:]
else:
UpperCAmelCase_ : Optional[int] = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(lowerCamelCase_ )
else:
new_pieces.append(lowerCamelCase_ )
return new_pieces
def A__ ( self: str ,lowerCamelCase_: str ) -> Any:
return self.sp_model.PieceToId(lowerCamelCase_ )
def A__ ( self: Dict ,lowerCamelCase_: int ) -> List[Any]:
return self.sp_model.IdToPiece(lowerCamelCase_ )
def A__ ( self: Optional[int] ,lowerCamelCase_: str ) -> str:
UpperCAmelCase_ : Optional[Any] = """""".join(lowerCamelCase_ ).replace(lowerCamelCase_ ,""" """ ).strip()
return out_string
def A__ ( self: Tuple ,lowerCamelCase_: List[int] ,lowerCamelCase_: Optional[List[int]] = None ) -> List[int]:
UpperCAmelCase_ : List[Any] = [self.sep_token_id]
UpperCAmelCase_ : Dict = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def A__ ( self: Tuple ,lowerCamelCase_: List[int] ,lowerCamelCase_: Optional[List[int]] = None ,lowerCamelCase_: bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_ ,token_ids_a=lowerCamelCase_ ,already_has_special_tokens=lowerCamelCase_ )
if token_ids_a is not None:
return ([0] * len(lowerCamelCase_ )) + [1] + ([0] * len(lowerCamelCase_ )) + [1, 1]
return ([0] * len(lowerCamelCase_ )) + [1, 1]
def A__ ( self: Union[str, Any] ,lowerCamelCase_: List[int] ,lowerCamelCase_: Optional[List[int]] = None ) -> List[int]:
UpperCAmelCase_ : Any = [self.sep_token_id]
UpperCAmelCase_ : Any = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def A__ ( self: Tuple ,lowerCamelCase_: str ,lowerCamelCase_: Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(lowerCamelCase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase_ : Tuple = os.path.join(
lowerCamelCase_ ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,lowerCamelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase_ ,"""wb""" ) as fi:
UpperCAmelCase_ : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase_ )
return (out_vocab_file,)
def A__ ( self: Union[str, Any] ,*lowerCamelCase_: Any ,**lowerCamelCase_: Optional[int] ) -> str:
UpperCAmelCase_ : List[str] = super()._decode(*lowerCamelCase_ ,**lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = text.replace(""" """ ,"""""" ).replace("""\u2582""" ,""" """ ).replace("""\u2583""" ,"""\n""" )
return text
| 345 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'''microsoft/layoutlmv3-base''': '''https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json''',
}
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : Optional[Any] = "layoutlmv3"
def __init__( self: str ,lowerCamelCase_: Any=50265 ,lowerCamelCase_: int=768 ,lowerCamelCase_: Any=12 ,lowerCamelCase_: Any=12 ,lowerCamelCase_: List[Any]=3072 ,lowerCamelCase_: str="gelu" ,lowerCamelCase_: List[str]=0.1 ,lowerCamelCase_: Any=0.1 ,lowerCamelCase_: Tuple=512 ,lowerCamelCase_: Union[str, Any]=2 ,lowerCamelCase_: Dict=0.0_2 ,lowerCamelCase_: List[str]=1e-5 ,lowerCamelCase_: int=1 ,lowerCamelCase_: int=0 ,lowerCamelCase_: List[str]=2 ,lowerCamelCase_: Dict=1024 ,lowerCamelCase_: Tuple=128 ,lowerCamelCase_: Tuple=128 ,lowerCamelCase_: Dict=True ,lowerCamelCase_: Union[str, Any]=32 ,lowerCamelCase_: Union[str, Any]=128 ,lowerCamelCase_: Tuple=64 ,lowerCamelCase_: Tuple=256 ,lowerCamelCase_: List[str]=True ,lowerCamelCase_: Optional[int]=True ,lowerCamelCase_: Any=True ,lowerCamelCase_: Dict=224 ,lowerCamelCase_: Optional[int]=3 ,lowerCamelCase_: Optional[int]=16 ,lowerCamelCase_: Dict=None ,**lowerCamelCase_: str ,) -> List[Any]:
super().__init__(
vocab_size=lowerCamelCase_ ,hidden_size=lowerCamelCase_ ,num_hidden_layers=lowerCamelCase_ ,num_attention_heads=lowerCamelCase_ ,intermediate_size=lowerCamelCase_ ,hidden_act=lowerCamelCase_ ,hidden_dropout_prob=lowerCamelCase_ ,attention_probs_dropout_prob=lowerCamelCase_ ,max_position_embeddings=lowerCamelCase_ ,type_vocab_size=lowerCamelCase_ ,initializer_range=lowerCamelCase_ ,layer_norm_eps=lowerCamelCase_ ,pad_token_id=lowerCamelCase_ ,bos_token_id=lowerCamelCase_ ,eos_token_id=lowerCamelCase_ ,**lowerCamelCase_ ,)
UpperCAmelCase_ : List[Any] = max_ad_position_embeddings
UpperCAmelCase_ : Optional[int] = coordinate_size
UpperCAmelCase_ : Optional[int] = shape_size
UpperCAmelCase_ : Optional[Any] = has_relative_attention_bias
UpperCAmelCase_ : Optional[int] = rel_pos_bins
UpperCAmelCase_ : Union[str, Any] = max_rel_pos
UpperCAmelCase_ : Dict = has_spatial_attention_bias
UpperCAmelCase_ : Optional[int] = rel_ad_pos_bins
UpperCAmelCase_ : Tuple = max_rel_ad_pos
UpperCAmelCase_ : Union[str, Any] = text_embed
UpperCAmelCase_ : Optional[Any] = visual_embed
UpperCAmelCase_ : List[str] = input_size
UpperCAmelCase_ : str = num_channels
UpperCAmelCase_ : Optional[int] = patch_size
UpperCAmelCase_ : Tuple = classifier_dropout
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : Optional[Any] = version.parse("1.12" )
@property
def A__ ( self: Dict ) -> Mapping[str, Mapping[int, str]]:
# The order of inputs is different for question answering and sequence classification
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
else:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels"""}),
] )
@property
def A__ ( self: Any ) -> float:
return 1e-5
@property
def A__ ( self: int ) -> int:
return 12
def A__ ( self: List[str] ,lowerCamelCase_: "ProcessorMixin" ,lowerCamelCase_: int = -1 ,lowerCamelCase_: int = -1 ,lowerCamelCase_: bool = False ,lowerCamelCase_: Optional["TensorType"] = None ,lowerCamelCase_: int = 3 ,lowerCamelCase_: int = 40 ,lowerCamelCase_: int = 40 ,) -> Mapping[str, Any]:
setattr(processor.image_processor ,"""apply_ocr""" ,lowerCamelCase_ )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCAmelCase_ : List[str] = compute_effective_axis_dimension(
lowerCamelCase_ ,fixed_dimension=OnnxConfig.default_fixed_batch ,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCAmelCase_ : int = processor.tokenizer.num_special_tokens_to_add(lowerCamelCase_ )
UpperCAmelCase_ : int = compute_effective_axis_dimension(
lowerCamelCase_ ,fixed_dimension=OnnxConfig.default_fixed_sequence ,num_token_to_add=lowerCamelCase_ )
# Generate dummy inputs according to compute batch and sequence
UpperCAmelCase_ : Optional[int] = [[""" """.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
UpperCAmelCase_ : List[Any] = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
UpperCAmelCase_ : Any = self._generate_dummy_images(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = dict(
processor(
lowerCamelCase_ ,text=lowerCamelCase_ ,boxes=lowerCamelCase_ ,return_tensors=lowerCamelCase_ ,) )
return inputs
| 345 | 1 |
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class _snake_case ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
A__ : Dict = AutoencoderKL
A__ : Optional[int] = "sample"
A__ : Tuple = 1E-2
@property
def A__ ( self: List[Any] ) -> Union[str, Any]:
UpperCAmelCase_ : Tuple = 4
UpperCAmelCase_ : str = 3
UpperCAmelCase_ : Any = (32, 32)
UpperCAmelCase_ : Optional[int] = floats_tensor((batch_size, num_channels) + sizes ).to(lowerCamelCase_ )
return {"sample": image}
@property
def A__ ( self: List[str] ) -> Tuple:
return (3, 32, 32)
@property
def A__ ( self: Optional[Any] ) -> Any:
return (3, 32, 32)
def A__ ( self: Any ) -> Tuple:
UpperCAmelCase_ : List[Any] = {
"""block_out_channels""": [32, 64],
"""in_channels""": 3,
"""out_channels""": 3,
"""down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],
"""up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],
"""latent_channels""": 4,
}
UpperCAmelCase_ : int = self.dummy_input
return init_dict, inputs_dict
def A__ ( self: Optional[Any] ) -> int:
pass
def A__ ( self: str ) -> Any:
pass
@unittest.skipIf(torch_device == """mps""" ,"""Gradient checkpointing skipped on MPS""" )
def A__ ( self: Union[str, Any] ) -> Dict:
# enable deterministic behavior for gradient checkpointing
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.prepare_init_args_and_inputs_for_common()
UpperCAmelCase_ : List[Any] = self.model_class(**lowerCamelCase_ )
model.to(lowerCamelCase_ )
assert not model.is_gradient_checkpointing and model.training
UpperCAmelCase_ : Optional[Any] = model(**lowerCamelCase_ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
UpperCAmelCase_ : Any = torch.randn_like(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
UpperCAmelCase_ : str = self.model_class(**lowerCamelCase_ )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(lowerCamelCase_ )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
UpperCAmelCase_ : Optional[int] = model_a(**lowerCamelCase_ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
UpperCAmelCase_ : Dict = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1e-5 )
UpperCAmelCase_ : Dict = dict(model.named_parameters() )
UpperCAmelCase_ : Union[str, Any] = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data ,named_params_a[name].grad.data ,atol=5e-5 ) )
def A__ ( self: Optional[Any] ) -> str:
UpperCAmelCase_ , UpperCAmelCase_ : int = AutoencoderKL.from_pretrained("""fusing/autoencoder-kl-dummy""" ,output_loading_info=lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
self.assertEqual(len(loading_info["""missing_keys"""] ) ,0 )
model.to(lowerCamelCase_ )
UpperCAmelCase_ : Dict = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def A__ ( self: Optional[int] ) -> int:
UpperCAmelCase_ : Dict = AutoencoderKL.from_pretrained("""fusing/autoencoder-kl-dummy""" )
UpperCAmelCase_ : Tuple = model.to(lowerCamelCase_ )
model.eval()
if torch_device == "mps":
UpperCAmelCase_ : Tuple = torch.manual_seed(0 )
else:
UpperCAmelCase_ : Optional[int] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
UpperCAmelCase_ : str = torch.randn(
1 ,model.config.in_channels ,model.config.sample_size ,model.config.sample_size ,generator=torch.manual_seed(0 ) ,)
UpperCAmelCase_ : int = image.to(lowerCamelCase_ )
with torch.no_grad():
UpperCAmelCase_ : Dict = model(lowerCamelCase_ ,sample_posterior=lowerCamelCase_ ,generator=lowerCamelCase_ ).sample
UpperCAmelCase_ : Optional[int] = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
UpperCAmelCase_ : Tuple = torch.tensor(
[
-4.0078e-01,
-3.8323e-04,
-1.2681e-01,
-1.1462e-01,
2.0095e-01,
1.0893e-01,
-8.8247e-02,
-3.0361e-01,
-9.8644e-03,
] )
elif torch_device == "cpu":
UpperCAmelCase_ : List[str] = torch.tensor(
[-0.1_3_5_2, 0.0_8_7_8, 0.0_4_1_9, -0.0_8_1_8, -0.1_0_6_9, 0.0_6_8_8, -0.1_4_5_8, -0.4_4_4_6, -0.0_0_2_6] )
else:
UpperCAmelCase_ : List[str] = torch.tensor(
[-0.2_4_2_1, 0.4_6_4_2, 0.2_5_0_7, -0.0_4_3_8, 0.0_6_8_2, 0.3_1_6_0, -0.2_0_1_8, -0.0_7_2_7, 0.2_4_8_5] )
self.assertTrue(torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,rtol=1e-2 ) )
@slow
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: Any ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Any ) -> Optional[Any]:
return F'''gaussian_noise_s={seed}_shape={'_'.join([str(lowerCamelCase_ ) for s in shape] )}.npy'''
def A__ ( self: Union[str, Any] ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self: List[str] ,lowerCamelCase_: Optional[int]=0 ,lowerCamelCase_: List[Any]=(4, 3, 512, 512) ,lowerCamelCase_: Optional[Any]=False ) -> Optional[int]:
UpperCAmelCase_ : Tuple = torch.floataa if fpaa else torch.floataa
UpperCAmelCase_ : Tuple = torch.from_numpy(load_hf_numpy(self.get_file_format(lowerCamelCase_ ,lowerCamelCase_ ) ) ).to(lowerCamelCase_ ).to(lowerCamelCase_ )
return image
def A__ ( self: List[Any] ,lowerCamelCase_: List[str]="CompVis/stable-diffusion-v1-4" ,lowerCamelCase_: Union[str, Any]=False ) -> Any:
UpperCAmelCase_ : Optional[Any] = """fp16""" if fpaa else None
UpperCAmelCase_ : str = torch.floataa if fpaa else torch.floataa
UpperCAmelCase_ : int = AutoencoderKL.from_pretrained(
lowerCamelCase_ ,subfolder="""vae""" ,torch_dtype=lowerCamelCase_ ,revision=lowerCamelCase_ ,)
model.to(lowerCamelCase_ ).eval()
return model
def A__ ( self: Dict ,lowerCamelCase_: Union[str, Any]=0 ) -> Optional[int]:
if torch_device == "mps":
return torch.manual_seed(lowerCamelCase_ )
return torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_6_0_3, 0.9_8_7_8, -0.0_4_9_5, -0.0_7_9_0, -0.2_7_0_9, 0.8_3_7_5, -0.2_0_6_0, -0.0_8_2_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]],
[47, [-0.2_3_7_6, 0.1_1_6_8, 0.1_3_3_2, -0.4_8_4_0, -0.2_5_0_8, -0.0_7_9_1, -0.0_4_9_3, -0.4_0_8_9], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]],
# fmt: on
] )
def A__ ( self: List[Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: str ,lowerCamelCase_: Dict ) -> Tuple:
UpperCAmelCase_ : List[Any] = self.get_sd_vae_model()
UpperCAmelCase_ : int = self.get_sd_image(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = self.get_generator(lowerCamelCase_ )
with torch.no_grad():
UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ ,generator=lowerCamelCase_ ,sample_posterior=lowerCamelCase_ ).sample
assert sample.shape == image.shape
UpperCAmelCase_ : Optional[Any] = sample[-1, -2:, -2:, :2].flatten().float().cpu()
UpperCAmelCase_ : Tuple = torch.tensor(expected_slice_mps if torch_device == """mps""" else expected_slice )
assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0_5_1_3, 0.0_2_8_9, 1.3_7_9_9, 0.2_1_6_6, -0.2_5_7_3, -0.0_8_7_1, 0.5_1_0_3, -0.0_9_9_9]],
[47, [-0.4_1_2_8, -0.1_3_2_0, -0.3_7_0_4, 0.1_9_6_5, -0.4_1_1_6, -0.2_3_3_2, -0.3_3_4_0, 0.2_2_4_7]],
# fmt: on
] )
@require_torch_gpu
def A__ ( self: Union[str, Any] ,lowerCamelCase_: Any ,lowerCamelCase_: List[str] ) -> Tuple:
UpperCAmelCase_ : List[str] = self.get_sd_vae_model(fpaa=lowerCamelCase_ )
UpperCAmelCase_ : Any = self.get_sd_image(lowerCamelCase_ ,fpaa=lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = self.get_generator(lowerCamelCase_ )
with torch.no_grad():
UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ ,generator=lowerCamelCase_ ,sample_posterior=lowerCamelCase_ ).sample
assert sample.shape == image.shape
UpperCAmelCase_ : Tuple = sample[-1, -2:, :2, -2:].flatten().float().cpu()
UpperCAmelCase_ : Optional[int] = torch.tensor(lowerCamelCase_ )
assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_6_0_9, 0.9_8_6_6, -0.0_4_8_7, -0.0_7_7_7, -0.2_7_1_6, 0.8_3_6_8, -0.2_0_5_5, -0.0_8_1_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]],
[47, [-0.2_3_7_7, 0.1_1_4_7, 0.1_3_3_3, -0.4_8_4_1, -0.2_5_0_6, -0.0_8_0_5, -0.0_4_9_1, -0.4_0_8_5], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]],
# fmt: on
] )
def A__ ( self: Tuple ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: List[str] ) -> Dict:
UpperCAmelCase_ : Optional[int] = self.get_sd_vae_model()
UpperCAmelCase_ : Dict = self.get_sd_image(lowerCamelCase_ )
with torch.no_grad():
UpperCAmelCase_ : str = model(lowerCamelCase_ ).sample
assert sample.shape == image.shape
UpperCAmelCase_ : List[Any] = sample[-1, -2:, -2:, :2].flatten().float().cpu()
UpperCAmelCase_ : Any = torch.tensor(expected_slice_mps if torch_device == """mps""" else expected_slice )
assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2_0_5_1, -0.1_8_0_3, -0.2_3_1_1, -0.2_1_1_4, -0.3_2_9_2, -0.3_5_7_4, -0.2_9_5_3, -0.3_3_2_3]],
[37, [-0.2_6_3_2, -0.2_6_2_5, -0.2_1_9_9, -0.2_7_4_1, -0.4_5_3_9, -0.4_9_9_0, -0.3_7_2_0, -0.4_9_2_5]],
# fmt: on
] )
@require_torch_gpu
def A__ ( self: Optional[Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: str ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = self.get_sd_vae_model()
UpperCAmelCase_ : Optional[int] = self.get_sd_image(lowerCamelCase_ ,shape=(3, 4, 64, 64) )
with torch.no_grad():
UpperCAmelCase_ : str = model.decode(lowerCamelCase_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
UpperCAmelCase_ : Any = sample[-1, -2:, :2, -2:].flatten().cpu()
UpperCAmelCase_ : Union[str, Any] = torch.tensor(lowerCamelCase_ )
assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=1e-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0_3_6_9, 0.0_2_0_7, -0.0_7_7_6, -0.0_6_8_2, -0.1_7_4_7, -0.1_9_3_0, -0.1_4_6_5, -0.2_0_3_9]],
[16, [-0.1_6_2_8, -0.2_1_3_4, -0.2_7_4_7, -0.2_6_4_2, -0.3_7_7_4, -0.4_4_0_4, -0.3_6_8_7, -0.4_2_7_7]],
# fmt: on
] )
@require_torch_gpu
def A__ ( self: str ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Any ) -> Optional[Any]:
UpperCAmelCase_ : Dict = self.get_sd_vae_model(fpaa=lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = self.get_sd_image(lowerCamelCase_ ,shape=(3, 4, 64, 64) ,fpaa=lowerCamelCase_ )
with torch.no_grad():
UpperCAmelCase_ : List[str] = model.decode(lowerCamelCase_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
UpperCAmelCase_ : str = sample[-1, -2:, :2, -2:].flatten().float().cpu()
UpperCAmelCase_ : str = torch.tensor(lowerCamelCase_ )
assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=5e-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() ,reason="""xformers is not required when using PyTorch 2.0.""" )
def A__ ( self: List[Any] ,lowerCamelCase_: Union[str, Any] ) -> int:
UpperCAmelCase_ : Optional[Any] = self.get_sd_vae_model(fpaa=lowerCamelCase_ )
UpperCAmelCase_ : List[str] = self.get_sd_image(lowerCamelCase_ ,shape=(3, 4, 64, 64) ,fpaa=lowerCamelCase_ )
with torch.no_grad():
UpperCAmelCase_ : Optional[Any] = model.decode(lowerCamelCase_ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
UpperCAmelCase_ : List[str] = model.decode(lowerCamelCase_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=1e-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() ,reason="""xformers is not required when using PyTorch 2.0.""" )
def A__ ( self: Optional[Any] ,lowerCamelCase_: Dict ) -> Union[str, Any]:
UpperCAmelCase_ : Tuple = self.get_sd_vae_model()
UpperCAmelCase_ : Any = self.get_sd_image(lowerCamelCase_ ,shape=(3, 4, 64, 64) )
with torch.no_grad():
UpperCAmelCase_ : Union[str, Any] = model.decode(lowerCamelCase_ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
UpperCAmelCase_ : Optional[Any] = model.decode(lowerCamelCase_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3_0_0_1, 0.0_9_1_8, -2.6_9_8_4, -3.9_7_2_0, -3.2_0_9_9, -5.0_3_5_3, 1.7_3_3_8, -0.2_0_6_5, 3.4_2_6_7]],
[47, [-1.5_0_3_0, -4.3_8_7_1, -6.0_3_5_5, -9.1_1_5_7, -1.6_6_6_1, -2.7_8_5_3, 2.1_6_0_7, -5.0_8_2_3, 2.5_6_3_3]],
# fmt: on
] )
def A__ ( self: Union[str, Any] ,lowerCamelCase_: Any ,lowerCamelCase_: Union[str, Any] ) -> Union[str, Any]:
UpperCAmelCase_ : Dict = self.get_sd_vae_model()
UpperCAmelCase_ : Optional[Any] = self.get_sd_image(lowerCamelCase_ )
UpperCAmelCase_ : str = self.get_generator(lowerCamelCase_ )
with torch.no_grad():
UpperCAmelCase_ : int = model.encode(lowerCamelCase_ ).latent_dist
UpperCAmelCase_ : Optional[Any] = dist.sample(generator=lowerCamelCase_ )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
UpperCAmelCase_ : Tuple = sample[0, -1, -3:, -3:].flatten().cpu()
UpperCAmelCase_ : Optional[Any] = torch.tensor(lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = 3e-3 if torch_device != """mps""" else 1e-2
assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=lowerCamelCase_ )
| 345 |
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def lowerCamelCase_ ( _a : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = [
"""decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(_a , _a )
def lowerCamelCase_ ( _a : Any ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = emb.weight.shape
UpperCAmelCase_ : Tuple = nn.Linear(_a , _a , bias=_a )
UpperCAmelCase_ : List[Any] = emb.weight.data
return lin_layer
def lowerCamelCase_ ( _a : Dict ):
'''simple docstring'''
UpperCAmelCase_ : int = torch.load(_a , map_location="""cpu""" )
UpperCAmelCase_ : Dict = Namespace(**checkpoint["""cfg"""]["""model"""] )
UpperCAmelCase_ : Optional[int] = checkpoint["""model"""]
remove_ignore_keys_(_a )
UpperCAmelCase_ : str = state_dict["""decoder.embed_tokens.weight"""].shape[0]
UpperCAmelCase_ : List[str] = {key.replace("""decoder""" , """model""" ): val for key, val in state_dict.items()}
UpperCAmelCase_ : int = XGLMConfig(
vocab_size=_a , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""gelu""" , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , )
UpperCAmelCase_ : List[str] = XGLMForCausalLM(_a )
UpperCAmelCase_ : Tuple = model.load_state_dict(_a , strict=_a )
print(_a )
UpperCAmelCase_ : Optional[Any] = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
UpperCamelCase_ = parser.parse_args()
UpperCamelCase_ = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 345 | 1 |
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: str ) -> int:
UpperCAmelCase_ : List[Any] = """ylacombe/bark-small"""
UpperCAmelCase_ : Tuple = tempfile.mkdtemp()
UpperCAmelCase_ : Union[str, Any] = """en_speaker_1"""
UpperCAmelCase_ : Optional[Any] = """This is a test string"""
UpperCAmelCase_ : int = """speaker_embeddings_path.json"""
UpperCAmelCase_ : Any = """speaker_embeddings"""
def A__ ( self: Tuple ,**lowerCamelCase_: List[str] ) -> List[Any]:
return AutoTokenizer.from_pretrained(self.checkpoint ,**lowerCamelCase_ )
def A__ ( self: str ) -> Union[str, Any]:
shutil.rmtree(self.tmpdirname )
def A__ ( self: List[Any] ) -> int:
UpperCAmelCase_ : int = self.get_tokenizer()
UpperCAmelCase_ : Tuple = BarkProcessor(tokenizer=lowerCamelCase_ )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : Optional[int] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer.get_vocab() )
@slow
def A__ ( self: List[Any] ) -> Optional[int]:
UpperCAmelCase_ : List[Any] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint ,speaker_embeddings_dict_path=self.speaker_embeddings_dict_path ,)
processor.save_pretrained(
self.tmpdirname ,speaker_embeddings_dict_path=self.speaker_embeddings_dict_path ,speaker_embeddings_directory=self.speaker_embeddings_directory ,)
UpperCAmelCase_ : Optional[Any] = self.get_tokenizer(bos_token="""(BOS)""" ,eos_token="""(EOS)""" )
UpperCAmelCase_ : List[Any] = BarkProcessor.from_pretrained(
self.tmpdirname ,self.speaker_embeddings_dict_path ,bos_token="""(BOS)""" ,eos_token="""(EOS)""" ,)
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
def A__ ( self: List[str] ) -> Optional[Any]:
UpperCAmelCase_ : Any = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint ,speaker_embeddings_dict_path=self.speaker_embeddings_dict_path ,)
UpperCAmelCase_ : Optional[int] = 35
UpperCAmelCase_ : Optional[int] = 2
UpperCAmelCase_ : Dict = 8
UpperCAmelCase_ : Optional[int] = {
"""semantic_prompt""": np.ones(lowerCamelCase_ ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
UpperCAmelCase_ : str = processor(text=self.input_string ,voice_preset=lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() ,processed_voice_preset.get(lowerCamelCase_ ,np.array([] ) ).tolist() )
# test loading voice preset from npz file
UpperCAmelCase_ : List[Any] = os.path.join(self.tmpdirname ,"""file.npz""" )
np.savez(lowerCamelCase_ ,**lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = processor(text=self.input_string ,voice_preset=lowerCamelCase_ )
UpperCAmelCase_ : int = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() ,processed_voice_preset.get(lowerCamelCase_ ,np.array([] ) ).tolist() )
# test loading voice preset from the hub
UpperCAmelCase_ : Union[str, Any] = processor(text=self.input_string ,voice_preset=self.voice_preset )
def A__ ( self: Dict ) -> Tuple:
UpperCAmelCase_ : Any = self.get_tokenizer()
UpperCAmelCase_ : Dict = BarkProcessor(tokenizer=lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = processor(text=self.input_string )
UpperCAmelCase_ : str = tokenizer(
self.input_string ,padding="""max_length""" ,max_length=256 ,add_special_tokens=lowerCamelCase_ ,return_attention_mask=lowerCamelCase_ ,return_token_type_ids=lowerCamelCase_ ,)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key].squeeze().tolist() )
| 345 |
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _snake_case :
'''simple docstring'''
def __init__( self: List[Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Union[str, Any]=13 ,lowerCamelCase_: Optional[int]=32 ,lowerCamelCase_: List[str]=2 ,lowerCamelCase_: Optional[Any]=3 ,lowerCamelCase_: int=16 ,lowerCamelCase_: Optional[Any]=[32, 64, 128] ,lowerCamelCase_: Optional[int]=[1, 2, 1] ,lowerCamelCase_: Union[str, Any]=[2, 2, 4] ,lowerCamelCase_: int=2 ,lowerCamelCase_: List[str]=2.0 ,lowerCamelCase_: List[Any]=True ,lowerCamelCase_: List[str]=0.0 ,lowerCamelCase_: List[str]=0.0 ,lowerCamelCase_: Optional[int]=0.1 ,lowerCamelCase_: Optional[int]="gelu" ,lowerCamelCase_: Any=False ,lowerCamelCase_: Dict=True ,lowerCamelCase_: Union[str, Any]=0.0_2 ,lowerCamelCase_: int=1e-5 ,lowerCamelCase_: int=True ,lowerCamelCase_: Tuple=None ,lowerCamelCase_: str=True ,lowerCamelCase_: Dict=10 ,lowerCamelCase_: str=8 ,lowerCamelCase_: Union[str, Any]=["stage1", "stage2"] ,lowerCamelCase_: Optional[Any]=[1, 2] ,) -> str:
UpperCAmelCase_ : List[Any] = parent
UpperCAmelCase_ : Tuple = batch_size
UpperCAmelCase_ : Any = image_size
UpperCAmelCase_ : str = patch_size
UpperCAmelCase_ : List[str] = num_channels
UpperCAmelCase_ : Dict = embed_dim
UpperCAmelCase_ : Dict = hidden_sizes
UpperCAmelCase_ : str = depths
UpperCAmelCase_ : int = num_heads
UpperCAmelCase_ : List[Any] = window_size
UpperCAmelCase_ : Union[str, Any] = mlp_ratio
UpperCAmelCase_ : int = qkv_bias
UpperCAmelCase_ : List[str] = hidden_dropout_prob
UpperCAmelCase_ : Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase_ : Optional[int] = drop_path_rate
UpperCAmelCase_ : Union[str, Any] = hidden_act
UpperCAmelCase_ : List[Any] = use_absolute_embeddings
UpperCAmelCase_ : List[Any] = patch_norm
UpperCAmelCase_ : int = layer_norm_eps
UpperCAmelCase_ : int = initializer_range
UpperCAmelCase_ : Optional[Any] = is_training
UpperCAmelCase_ : Optional[Any] = scope
UpperCAmelCase_ : Union[str, Any] = use_labels
UpperCAmelCase_ : Union[str, Any] = type_sequence_label_size
UpperCAmelCase_ : Optional[int] = encoder_stride
UpperCAmelCase_ : Optional[int] = out_features
UpperCAmelCase_ : Optional[int] = out_indices
def A__ ( self: Union[str, Any] ) -> List[Any]:
UpperCAmelCase_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ : int = None
if self.use_labels:
UpperCAmelCase_ : str = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase_ : Any = self.get_config()
return config, pixel_values, labels
def A__ ( self: List[Any] ) -> Tuple:
return FocalNetConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,num_heads=self.num_heads ,window_size=self.window_size ,mlp_ratio=self.mlp_ratio ,qkv_bias=self.qkv_bias ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,drop_path_rate=self.drop_path_rate ,hidden_act=self.hidden_act ,use_absolute_embeddings=self.use_absolute_embeddings ,path_norm=self.patch_norm ,layer_norm_eps=self.layer_norm_eps ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,out_features=self.out_features ,out_indices=self.out_indices ,)
def A__ ( self: Dict ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: str ,lowerCamelCase_: str ) -> List[str]:
UpperCAmelCase_ : Optional[int] = FocalNetModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : List[Any] = model(lowerCamelCase_ )
UpperCAmelCase_ : Dict = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
UpperCAmelCase_ : Optional[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, expected_seq_len, expected_dim) )
def A__ ( self: Union[str, Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Any ,lowerCamelCase_: Optional[int] ) -> List[str]:
UpperCAmelCase_ : List[str] = FocalNetBackbone(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Tuple = model(lowerCamelCase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,len(config.out_features ) )
self.parent.assertListEqual(model.channels ,config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : List[str] = FocalNetBackbone(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Tuple = model(lowerCamelCase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,1 )
self.parent.assertListEqual(model.channels ,[config.hidden_sizes[-1]] )
def A__ ( self: Optional[int] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Union[str, Any] ) -> List[Any]:
UpperCAmelCase_ : Any = FocalNetForMaskedImageModeling(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(lowerCamelCase_ )
self.parent.assertEqual(
result.reconstruction.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCAmelCase_ : int = 1
UpperCAmelCase_ : List[str] = FocalNetForMaskedImageModeling(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ : Optional[int] = model(lowerCamelCase_ )
self.parent.assertEqual(result.reconstruction.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def A__ ( self: List[str] ,lowerCamelCase_: List[str] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Any ) -> int:
UpperCAmelCase_ : List[Any] = self.type_sequence_label_size
UpperCAmelCase_ : int = FocalNetForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ ,labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase_ : List[Any] = 1
UpperCAmelCase_ : Optional[int] = FocalNetForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ : List[str] = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def A__ ( self: Union[str, Any] ) -> Optional[int]:
UpperCAmelCase_ : List[Any] = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = config_and_inputs
UpperCAmelCase_ : int = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _snake_case ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
A__ : List[Any] = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
A__ : Union[str, Any] = (
{"feature-extraction": FocalNetModel, "image-classification": FocalNetForImageClassification}
if is_torch_available()
else {}
)
A__ : Optional[Any] = False
A__ : Any = False
A__ : List[str] = False
A__ : Any = False
A__ : Any = False
def A__ ( self: List[str] ) -> Tuple:
UpperCAmelCase_ : Dict = FocalNetModelTester(self )
UpperCAmelCase_ : int = ConfigTester(self ,config_class=lowerCamelCase_ ,embed_dim=37 ,has_text_modality=lowerCamelCase_ )
def A__ ( self: List[str] ) -> int:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A__ ( self: List[str] ) -> Union[str, Any]:
return
def A__ ( self: str ) -> List[str]:
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def A__ ( self: Tuple ) -> int:
UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowerCamelCase_ )
def A__ ( self: Dict ) -> List[str]:
UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase_ )
def A__ ( self: int ) -> int:
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
@unittest.skip(reason="""FocalNet does not use inputs_embeds""" )
def A__ ( self: int ) -> Dict:
pass
@unittest.skip(reason="""FocalNet does not use feedforward chunking""" )
def A__ ( self: Optional[Any] ) -> Optional[Any]:
pass
def A__ ( self: Optional[Any] ) -> List[str]:
UpperCAmelCase_ , UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
UpperCAmelCase_ : Optional[Any] = model_class(lowerCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
UpperCAmelCase_ : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase_ ,nn.Linear ) )
def A__ ( self: str ) -> Optional[int]:
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
UpperCAmelCase_ : str = model_class(lowerCamelCase_ )
UpperCAmelCase_ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : Any = [*signature.parameters.keys()]
UpperCAmelCase_ : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,lowerCamelCase_ )
def A__ ( self: Dict ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Dict ,lowerCamelCase_: Any ) -> List[str]:
UpperCAmelCase_ : Tuple = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : Optional[int] = model(**self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) )
UpperCAmelCase_ : Any = outputs.hidden_states
UpperCAmelCase_ : List[Any] = getattr(
self.model_tester ,"""expected_num_hidden_layers""" ,len(self.model_tester.depths ) + 1 )
self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ )
# FocalNet has a different seq_length
UpperCAmelCase_ : int = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCAmelCase_ : Optional[int] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
UpperCAmelCase_ : Union[str, Any] = outputs.reshaped_hidden_states
self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = reshaped_hidden_states[0].shape
UpperCAmelCase_ : List[Any] = (
reshaped_hidden_states[0].view(lowerCamelCase_ ,lowerCamelCase_ ,height * width ).permute(0 ,2 ,1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
def A__ ( self: Any ) -> List[Any]:
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Optional[int] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
UpperCAmelCase_ : str = True
self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ : Union[str, Any] = True
self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
def A__ ( self: List[str] ) -> str:
UpperCAmelCase_ , UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Tuple = 3
UpperCAmelCase_ : Tuple = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
UpperCAmelCase_ : Union[str, Any] = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCAmelCase_ : Union[str, Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
UpperCAmelCase_ : Any = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
UpperCAmelCase_ : Optional[Any] = True
self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,(padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ : Optional[int] = True
self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,(padded_height, padded_width) )
@slow
def A__ ( self: Optional[int] ) -> Optional[Any]:
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Tuple = FocalNetModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def A__ ( self: Optional[Any] ) -> Optional[int]:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Optional[int] = _config_zero_init(lowerCamelCase_ )
for model_class in self.all_model_classes:
UpperCAmelCase_ : List[Any] = model_class(config=lowerCamelCase_ )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' ,)
@require_vision
@require_torch
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def A__ ( self: Optional[int] ) -> str:
# TODO update organization
return AutoImageProcessor.from_pretrained("""microsoft/focalnet-tiny""" ) if is_vision_available() else None
@slow
def A__ ( self: List[Any] ) -> List[str]:
UpperCAmelCase_ : Optional[int] = FocalNetForImageClassification.from_pretrained("""microsoft/focalnet-tiny""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : Tuple = self.default_image_processor
UpperCAmelCase_ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
UpperCAmelCase_ : Dict = image_processor(images=lowerCamelCase_ ,return_tensors="""pt""" ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : Dict = model(**lowerCamelCase_ )
# verify the logits
UpperCAmelCase_ : str = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,lowerCamelCase_ ,atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() ,281 )
@require_torch
class _snake_case ( __snake_case , unittest.TestCase ):
'''simple docstring'''
A__ : List[Any] = (FocalNetBackbone,) if is_torch_available() else ()
A__ : int = FocalNetConfig
A__ : List[str] = False
def A__ ( self: Any ) -> Optional[int]:
UpperCAmelCase_ : str = FocalNetModelTester(self )
| 345 | 1 |
UpperCamelCase_ = '''0.18.2'''
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 345 |
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _snake_case :
'''simple docstring'''
def __init__( self: Tuple ,lowerCamelCase_: List[str] ,lowerCamelCase_: int=13 ,lowerCamelCase_: int=32 ,lowerCamelCase_: Optional[int]=2 ,lowerCamelCase_: Any=3 ,lowerCamelCase_: str=16 ,lowerCamelCase_: Optional[Any]=[1, 2, 1] ,lowerCamelCase_: Tuple=[2, 2, 4] ,lowerCamelCase_: int=2 ,lowerCamelCase_: List[Any]=2.0 ,lowerCamelCase_: str=True ,lowerCamelCase_: Optional[int]=0.0 ,lowerCamelCase_: List[Any]=0.0 ,lowerCamelCase_: List[str]=0.1 ,lowerCamelCase_: Tuple="gelu" ,lowerCamelCase_: Union[str, Any]=False ,lowerCamelCase_: Union[str, Any]=True ,lowerCamelCase_: Optional[int]=0.0_2 ,lowerCamelCase_: int=1e-5 ,lowerCamelCase_: Optional[int]=True ,lowerCamelCase_: Union[str, Any]=None ,lowerCamelCase_: Union[str, Any]=True ,lowerCamelCase_: Optional[int]=10 ,lowerCamelCase_: Tuple=8 ,) -> List[Any]:
UpperCAmelCase_ : List[str] = parent
UpperCAmelCase_ : int = batch_size
UpperCAmelCase_ : int = image_size
UpperCAmelCase_ : Union[str, Any] = patch_size
UpperCAmelCase_ : Optional[Any] = num_channels
UpperCAmelCase_ : int = embed_dim
UpperCAmelCase_ : Union[str, Any] = depths
UpperCAmelCase_ : List[str] = num_heads
UpperCAmelCase_ : int = window_size
UpperCAmelCase_ : List[str] = mlp_ratio
UpperCAmelCase_ : Tuple = qkv_bias
UpperCAmelCase_ : Tuple = hidden_dropout_prob
UpperCAmelCase_ : str = attention_probs_dropout_prob
UpperCAmelCase_ : Tuple = drop_path_rate
UpperCAmelCase_ : List[str] = hidden_act
UpperCAmelCase_ : int = use_absolute_embeddings
UpperCAmelCase_ : Any = patch_norm
UpperCAmelCase_ : Optional[int] = layer_norm_eps
UpperCAmelCase_ : Tuple = initializer_range
UpperCAmelCase_ : Optional[Any] = is_training
UpperCAmelCase_ : Dict = scope
UpperCAmelCase_ : int = use_labels
UpperCAmelCase_ : Optional[Any] = type_sequence_label_size
UpperCAmelCase_ : List[str] = encoder_stride
def A__ ( self: Any ) -> int:
UpperCAmelCase_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ : List[Any] = None
if self.use_labels:
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase_ : str = self.get_config()
return config, pixel_values, labels
def A__ ( self: List[Any] ) -> Union[str, Any]:
return SwinvaConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,depths=self.depths ,num_heads=self.num_heads ,window_size=self.window_size ,mlp_ratio=self.mlp_ratio ,qkv_bias=self.qkv_bias ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,drop_path_rate=self.drop_path_rate ,hidden_act=self.hidden_act ,use_absolute_embeddings=self.use_absolute_embeddings ,path_norm=self.patch_norm ,layer_norm_eps=self.layer_norm_eps ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,)
def A__ ( self: Dict ,lowerCamelCase_: Tuple ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: List[str] ) -> str:
UpperCAmelCase_ : str = SwinvaModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
UpperCAmelCase_ : List[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, expected_seq_len, expected_dim) )
def A__ ( self: List[Any] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: int ,lowerCamelCase_: int ) -> int:
UpperCAmelCase_ : Any = SwinvaForMaskedImageModeling(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCAmelCase_ : str = 1
UpperCAmelCase_ : Optional[Any] = SwinvaForMaskedImageModeling(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ : int = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def A__ ( self: int ,lowerCamelCase_: int ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Optional[Any] ) -> int:
UpperCAmelCase_ : Union[str, Any] = self.type_sequence_label_size
UpperCAmelCase_ : int = SwinvaForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Optional[int] = model(lowerCamelCase_ ,labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def A__ ( self: str ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[Any] = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = config_and_inputs
UpperCAmelCase_ : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _snake_case ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
A__ : Tuple = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
A__ : Optional[Any] = (
{"feature-extraction": SwinvaModel, "image-classification": SwinvaForImageClassification}
if is_torch_available()
else {}
)
A__ : List[Any] = False
A__ : Tuple = False
A__ : int = False
A__ : Union[str, Any] = False
def A__ ( self: List[str] ) -> Optional[Any]:
UpperCAmelCase_ : Any = SwinvaModelTester(self )
UpperCAmelCase_ : str = ConfigTester(self ,config_class=lowerCamelCase_ ,embed_dim=37 )
def A__ ( self: Optional[int] ) -> List[Any]:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A__ ( self: Any ) -> Dict:
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
@unittest.skip(reason="""Got `CUDA error: misaligned address` with PyTorch 2.0.0.""" )
def A__ ( self: int ) -> Dict:
pass
@unittest.skip(reason="""Swinv2 does not use inputs_embeds""" )
def A__ ( self: Tuple ) -> List[str]:
pass
def A__ ( self: str ) -> List[Any]:
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : int = model_class(lowerCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
UpperCAmelCase_ : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase_ ,nn.Linear ) )
def A__ ( self: Optional[Any] ) -> Optional[int]:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Dict = model_class(lowerCamelCase_ )
UpperCAmelCase_ : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : int = [*signature.parameters.keys()]
UpperCAmelCase_ : Tuple = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,lowerCamelCase_ )
def A__ ( self: Union[str, Any] ) -> Optional[Any]:
UpperCAmelCase_ , UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Any = True
for model_class in self.all_model_classes:
UpperCAmelCase_ : Optional[Any] = True
UpperCAmelCase_ : Union[str, Any] = False
UpperCAmelCase_ : str = True
UpperCAmelCase_ : List[Any] = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : Optional[int] = model(**self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) )
UpperCAmelCase_ : Optional[Any] = outputs.attentions
UpperCAmelCase_ : List[str] = len(self.model_tester.depths )
self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCAmelCase_ : str = True
UpperCAmelCase_ : Optional[Any] = config.window_size**2
UpperCAmelCase_ : Optional[int] = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : Optional[Any] = model(**self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) )
UpperCAmelCase_ : List[Any] = outputs.attentions
self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ )
self.assertListEqual(
list(attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,)
UpperCAmelCase_ : Optional[Any] = len(lowerCamelCase_ )
# Check attention is always last and order is fine
UpperCAmelCase_ : Tuple = True
UpperCAmelCase_ : List[Any] = True
UpperCAmelCase_ : Tuple = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : Union[str, Any] = model(**self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) )
if hasattr(self.model_tester ,"""num_hidden_states_types""" ):
UpperCAmelCase_ : List[Any] = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
UpperCAmelCase_ : List[str] = 2
self.assertEqual(out_len + added_hidden_states ,len(lowerCamelCase_ ) )
UpperCAmelCase_ : Any = outputs.attentions
self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,)
def A__ ( self: List[str] ,lowerCamelCase_: Dict ,lowerCamelCase_: Tuple ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Optional[int] ) -> List[Any]:
UpperCAmelCase_ : str = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : int = model(**self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) )
UpperCAmelCase_ : List[str] = outputs.hidden_states
UpperCAmelCase_ : Optional[Any] = getattr(
self.model_tester ,"""expected_num_hidden_layers""" ,len(self.model_tester.depths ) + 1 )
self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ )
# Swinv2 has a different seq_length
UpperCAmelCase_ : Optional[Any] = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCAmelCase_ : int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
UpperCAmelCase_ : Optional[int] = outputs.reshaped_hidden_states
self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = reshaped_hidden_states[0].shape
UpperCAmelCase_ : Optional[Any] = (
reshaped_hidden_states[0].view(lowerCamelCase_ ,lowerCamelCase_ ,height * width ).permute(0 ,2 ,1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
def A__ ( self: Any ) -> int:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Dict = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
UpperCAmelCase_ : Any = True
self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ : str = True
self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
def A__ ( self: List[str] ) -> Dict:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Union[str, Any] = 3
UpperCAmelCase_ : Optional[int] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
UpperCAmelCase_ : List[str] = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCAmelCase_ : List[Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
UpperCAmelCase_ : Optional[Any] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
UpperCAmelCase_ : Optional[Any] = True
self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,(padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ : List[str] = True
self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,(padded_height, padded_width) )
def A__ ( self: Optional[int] ) -> str:
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase_ )
def A__ ( self: Union[str, Any] ) -> Dict:
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
@slow
def A__ ( self: str ) -> Tuple:
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Dict = SwinvaModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def A__ ( self: Any ) -> int:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : List[str] = _config_zero_init(lowerCamelCase_ )
for model_class in self.all_model_classes:
UpperCAmelCase_ : int = model_class(config=lowerCamelCase_ )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' ,)
@require_vision
@require_torch
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def A__ ( self: Dict ) -> Optional[Any]:
return (
AutoImageProcessor.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" )
if is_vision_available()
else None
)
@slow
def A__ ( self: str ) -> List[Any]:
UpperCAmelCase_ : Tuple = SwinvaForImageClassification.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" ).to(
lowerCamelCase_ )
UpperCAmelCase_ : Any = self.default_image_processor
UpperCAmelCase_ : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
UpperCAmelCase_ : Optional[int] = image_processor(images=lowerCamelCase_ ,return_tensors="""pt""" ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : Optional[Any] = model(**lowerCamelCase_ )
# verify the logits
UpperCAmelCase_ : Dict = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,lowerCamelCase_ )
UpperCAmelCase_ : Any = torch.tensor([-0.3_9_4_7, -0.4_3_0_6, 0.0_0_2_6] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,lowerCamelCase_ ,atol=1e-4 ) )
| 345 | 1 |
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : int = "M-CLIP"
def __init__( self: Any ,lowerCamelCase_: Optional[Any]=1024 ,lowerCamelCase_: List[str]=768 ,**lowerCamelCase_: str ) -> List[Any]:
UpperCAmelCase_ : List[Any] = transformerDimSize
UpperCAmelCase_ : str = imageDimSize
super().__init__(**lowerCamelCase_ )
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : int = MCLIPConfig
def __init__( self: str ,lowerCamelCase_: Tuple ,*lowerCamelCase_: Any ,**lowerCamelCase_: List[Any] ) -> List[str]:
super().__init__(lowerCamelCase_ ,*lowerCamelCase_ ,**lowerCamelCase_ )
UpperCAmelCase_ : Any = XLMRobertaModel(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = torch.nn.Linear(
in_features=config.transformerDimensions ,out_features=config.numDims )
def A__ ( self: str ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Union[str, Any] ) -> List[Any]:
UpperCAmelCase_ : Optional[Any] = self.transformer(input_ids=lowerCamelCase_ ,attention_mask=lowerCamelCase_ )[0]
UpperCAmelCase_ : Tuple = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(lowerCamelCase_ ), embs
| 345 |
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
# See all LED models at https://huggingface.co/models?filter=LED
UpperCamelCase_ = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
UpperCamelCase_ = {
'''allenai/led-base-16384''': 16384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def lowerCamelCase_ ( ):
'''simple docstring'''
UpperCAmelCase_ : int = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
UpperCAmelCase_ : Dict = bs[:]
UpperCAmelCase_ : Any = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_a )
cs.append(2**8 + n )
n += 1
UpperCAmelCase_ : Any = [chr(_a ) for n in cs]
return dict(zip(_a , _a ) )
def lowerCamelCase_ ( _a : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = set()
UpperCAmelCase_ : List[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase_ : Optional[int] = char
return pairs
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : str = VOCAB_FILES_NAMES
A__ : List[str] = PRETRAINED_VOCAB_FILES_MAP
A__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Optional[int] = ["input_ids", "attention_mask"]
def __init__( self: Union[str, Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Any ,lowerCamelCase_: Union[str, Any]="replace" ,lowerCamelCase_: Optional[Any]="<s>" ,lowerCamelCase_: List[Any]="</s>" ,lowerCamelCase_: List[str]="</s>" ,lowerCamelCase_: int="<s>" ,lowerCamelCase_: int="<unk>" ,lowerCamelCase_: str="<pad>" ,lowerCamelCase_: Optional[Any]="<mask>" ,lowerCamelCase_: List[str]=False ,**lowerCamelCase_: Tuple ,) -> Any:
UpperCAmelCase_ : Union[str, Any] = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else bos_token
UpperCAmelCase_ : int = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else eos_token
UpperCAmelCase_ : List[str] = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else sep_token
UpperCAmelCase_ : List[str] = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else cls_token
UpperCAmelCase_ : Optional[Any] = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else unk_token
UpperCAmelCase_ : List[str] = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase_ : str = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else mask_token
super().__init__(
errors=lowerCamelCase_ ,bos_token=lowerCamelCase_ ,eos_token=lowerCamelCase_ ,unk_token=lowerCamelCase_ ,sep_token=lowerCamelCase_ ,cls_token=lowerCamelCase_ ,pad_token=lowerCamelCase_ ,mask_token=lowerCamelCase_ ,add_prefix_space=lowerCamelCase_ ,**lowerCamelCase_ ,)
with open(lowerCamelCase_ ,encoding="""utf-8""" ) as vocab_handle:
UpperCAmelCase_ : Union[str, Any] = json.load(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = {v: k for k, v in self.encoder.items()}
UpperCAmelCase_ : Any = errors # how to handle errors in decoding
UpperCAmelCase_ : int = bytes_to_unicode()
UpperCAmelCase_ : Dict = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCamelCase_ ,encoding="""utf-8""" ) as merges_handle:
UpperCAmelCase_ : Any = merges_handle.read().split("""\n""" )[1:-1]
UpperCAmelCase_ : int = [tuple(merge.split() ) for merge in bpe_merges]
UpperCAmelCase_ : Union[str, Any] = dict(zip(lowerCamelCase_ ,range(len(lowerCamelCase_ ) ) ) )
UpperCAmelCase_ : Tuple = {}
UpperCAmelCase_ : Optional[int] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCAmelCase_ : int = re.compile(R"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def A__ ( self: List[str] ) -> List[str]:
return len(self.encoder )
def A__ ( self: Any ) -> Union[str, Any]:
return dict(self.encoder ,**self.added_tokens_encoder )
def A__ ( self: Tuple ,lowerCamelCase_: Dict ) -> Optional[Any]:
if token in self.cache:
return self.cache[token]
UpperCAmelCase_ : Union[str, Any] = tuple(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = get_pairs(lowerCamelCase_ )
if not pairs:
return token
while True:
UpperCAmelCase_ : Union[str, Any] = min(lowerCamelCase_ ,key=lambda lowerCamelCase_ : self.bpe_ranks.get(lowerCamelCase_ ,float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase_ , UpperCAmelCase_ : Any = bigram
UpperCAmelCase_ : Optional[Any] = []
UpperCAmelCase_ : List[str] = 0
while i < len(lowerCamelCase_ ):
try:
UpperCAmelCase_ : str = word.index(lowerCamelCase_ ,lowerCamelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCAmelCase_ : Union[str, Any] = j
if word[i] == first and i < len(lowerCamelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase_ : List[str] = tuple(lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = new_word
if len(lowerCamelCase_ ) == 1:
break
else:
UpperCAmelCase_ : List[str] = get_pairs(lowerCamelCase_ )
UpperCAmelCase_ : int = """ """.join(lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = word
return word
def A__ ( self: Union[str, Any] ,lowerCamelCase_: Tuple ) -> List[str]:
UpperCAmelCase_ : str = []
for token in re.findall(self.pat ,lowerCamelCase_ ):
UpperCAmelCase_ : List[Any] = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCamelCase_ ).split(""" """ ) )
return bpe_tokens
def A__ ( self: List[Any] ,lowerCamelCase_: Optional[Any] ) -> Optional[int]:
return self.encoder.get(lowerCamelCase_ ,self.encoder.get(self.unk_token ) )
def A__ ( self: List[str] ,lowerCamelCase_: str ) -> Optional[Any]:
return self.decoder.get(lowerCamelCase_ )
def A__ ( self: List[str] ,lowerCamelCase_: List[str] ) -> List[Any]:
UpperCAmelCase_ : str = """""".join(lowerCamelCase_ )
UpperCAmelCase_ : int = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" ,errors=self.errors )
return text
def A__ ( self: Optional[Any] ,lowerCamelCase_: str ,lowerCamelCase_: Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(lowerCamelCase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase_ : List[Any] = os.path.join(
lowerCamelCase_ ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCAmelCase_ : List[str] = os.path.join(
lowerCamelCase_ ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=lowerCamelCase_ ,ensure_ascii=lowerCamelCase_ ) + """\n""" )
UpperCAmelCase_ : str = 0
with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda lowerCamelCase_ : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
""" Please check that the tokenizer is not corrupted!""" )
UpperCAmelCase_ : Tuple = token_index
writer.write(""" """.join(lowerCamelCase_ ) + """\n""" )
index += 1
return vocab_file, merge_file
def A__ ( self: str ,lowerCamelCase_: List[int] ,lowerCamelCase_: Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase_ : int = [self.cls_token_id]
UpperCAmelCase_ : Optional[int] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A__ ( self: Union[str, Any] ,lowerCamelCase_: List[int] ,lowerCamelCase_: Optional[List[int]] = None ,lowerCamelCase_: bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_ ,token_ids_a=lowerCamelCase_ ,already_has_special_tokens=lowerCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase_ )) + [1]
return [1] + ([0] * len(lowerCamelCase_ )) + [1, 1] + ([0] * len(lowerCamelCase_ )) + [1]
def A__ ( self: str ,lowerCamelCase_: List[int] ,lowerCamelCase_: Optional[List[int]] = None ) -> List[int]:
UpperCAmelCase_ : Optional[Any] = [self.sep_token_id]
UpperCAmelCase_ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A__ ( self: Optional[Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: str=False ,**lowerCamelCase_: List[str] ) -> Optional[int]:
UpperCAmelCase_ : Optional[int] = kwargs.pop("""add_prefix_space""" ,self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCamelCase_ ) > 0 and not text[0].isspace()):
UpperCAmelCase_ : Dict = """ """ + text
return (text, kwargs)
def A__ ( self: List[str] ,lowerCamelCase_: Union[Dict[str, EncodedInput], BatchEncoding] ,lowerCamelCase_: Optional[int] = None ,lowerCamelCase_: PaddingStrategy = PaddingStrategy.DO_NOT_PAD ,lowerCamelCase_: Optional[int] = None ,lowerCamelCase_: Optional[bool] = None ,) -> dict:
UpperCAmelCase_ : Optional[int] = super()._pad(
encoded_inputs=lowerCamelCase_ ,max_length=lowerCamelCase_ ,padding_strategy=lowerCamelCase_ ,pad_to_multiple_of=lowerCamelCase_ ,return_attention_mask=lowerCamelCase_ ,)
# Load from model defaults
if return_attention_mask is None:
UpperCAmelCase_ : str = """attention_mask""" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
UpperCAmelCase_ : str = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
UpperCAmelCase_ : List[Any] = len(encoded_inputs["""global_attention_mask"""] ) != len(lowerCamelCase_ )
if needs_to_be_padded:
UpperCAmelCase_ : Dict = len(lowerCamelCase_ ) - len(encoded_inputs["""global_attention_mask"""] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
UpperCAmelCase_ : str = (
encoded_inputs["""global_attention_mask"""] + [-1] * difference
)
elif self.padding_side == "left":
UpperCAmelCase_ : List[str] = [-1] * difference + encoded_inputs[
"""global_attention_mask"""
]
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return encoded_inputs
| 345 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase_ = {
'''configuration_convbert''': ['''CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ConvBertConfig''', '''ConvBertOnnxConfig'''],
'''tokenization_convbert''': ['''ConvBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ['''ConvBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'''CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ConvBertForMaskedLM''',
'''ConvBertForMultipleChoice''',
'''ConvBertForQuestionAnswering''',
'''ConvBertForSequenceClassification''',
'''ConvBertForTokenClassification''',
'''ConvBertLayer''',
'''ConvBertModel''',
'''ConvBertPreTrainedModel''',
'''load_tf_weights_in_convbert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'''TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFConvBertForMaskedLM''',
'''TFConvBertForMultipleChoice''',
'''TFConvBertForQuestionAnswering''',
'''TFConvBertForSequenceClassification''',
'''TFConvBertForTokenClassification''',
'''TFConvBertLayer''',
'''TFConvBertModel''',
'''TFConvBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 345 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: Union[str, Any] ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def A__ ( self: List[str] ) -> Dict:
UpperCAmelCase_ : Union[str, Any] = 1
UpperCAmelCase_ : Tuple = 3
UpperCAmelCase_ : Optional[Any] = (32, 32)
UpperCAmelCase_ : Optional[int] = floats_tensor((batch_size, num_channels) + sizes ,rng=random.Random(0 ) ).to(lowerCamelCase_ )
return image
@property
def A__ ( self: List[Any] ) -> Optional[Any]:
torch.manual_seed(0 )
UpperCAmelCase_ : int = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") ,up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") ,cross_attention_dim=32 ,)
return model
@property
def A__ ( self: str ) -> List[str]:
torch.manual_seed(0 )
UpperCAmelCase_ : Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,)
return model
@property
def A__ ( self: Optional[int] ) -> int:
torch.manual_seed(0 )
UpperCAmelCase_ : Dict = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,)
return CLIPTextModel(lowerCamelCase_ )
@property
def A__ ( self: Tuple ) -> Tuple:
def extract(*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: str ):
class _snake_case :
'''simple docstring'''
def __init__( self: List[Any] ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = torch.ones([0] )
def A__ ( self: List[Any] ,lowerCamelCase_: str ) -> int:
self.pixel_values.to(lowerCamelCase_ )
return self
return Out()
return extract
def A__ ( self: Union[str, Any] ) -> Tuple:
UpperCAmelCase_ : int = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : int = self.dummy_cond_unet
UpperCAmelCase_ : Optional[Any] = DDIMScheduler(
beta_start=0.0_0_0_8_5 ,beta_end=0.0_1_2 ,beta_schedule="""scaled_linear""" ,clip_sample=lowerCamelCase_ ,set_alpha_to_one=lowerCamelCase_ ,)
UpperCAmelCase_ : str = self.dummy_vae
UpperCAmelCase_ : List[str] = self.dummy_text_encoder
UpperCAmelCase_ : int = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
UpperCAmelCase_ : str = StableDiffusionPipeline(
unet=lowerCamelCase_ ,scheduler=lowerCamelCase_ ,vae=lowerCamelCase_ ,text_encoder=lowerCamelCase_ ,tokenizer=lowerCamelCase_ ,safety_checker=lowerCamelCase_ ,feature_extractor=self.dummy_extractor ,)
UpperCAmelCase_ : List[str] = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : List[str] = """A painting of a squirrel eating a burger"""
UpperCAmelCase_ : str = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
UpperCAmelCase_ : int = sd_pipe([prompt] ,generator=lowerCamelCase_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" )
UpperCAmelCase_ : List[Any] = output.images
UpperCAmelCase_ : str = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
UpperCAmelCase_ : Dict = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" ,return_dict=lowerCamelCase_ ,)[0]
UpperCAmelCase_ : int = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ : Tuple = np.array([0.5_7_5_6, 0.6_1_1_8, 0.5_0_0_5, 0.5_0_4_1, 0.5_4_7_1, 0.4_7_2_6, 0.4_9_7_6, 0.4_8_6_5, 0.4_8_6_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def A__ ( self: Optional[Any] ) -> Any:
UpperCAmelCase_ : Tuple = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : Dict = self.dummy_cond_unet
UpperCAmelCase_ : List[Any] = PNDMScheduler(skip_prk_steps=lowerCamelCase_ )
UpperCAmelCase_ : str = self.dummy_vae
UpperCAmelCase_ : Union[str, Any] = self.dummy_text_encoder
UpperCAmelCase_ : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
UpperCAmelCase_ : Any = StableDiffusionPipeline(
unet=lowerCamelCase_ ,scheduler=lowerCamelCase_ ,vae=lowerCamelCase_ ,text_encoder=lowerCamelCase_ ,tokenizer=lowerCamelCase_ ,safety_checker=lowerCamelCase_ ,feature_extractor=self.dummy_extractor ,)
UpperCAmelCase_ : int = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = """A painting of a squirrel eating a burger"""
UpperCAmelCase_ : Optional[Any] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
UpperCAmelCase_ : Optional[Any] = sd_pipe([prompt] ,generator=lowerCamelCase_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" )
UpperCAmelCase_ : str = output.images
UpperCAmelCase_ : Union[str, Any] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
UpperCAmelCase_ : int = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" ,return_dict=lowerCamelCase_ ,)[0]
UpperCAmelCase_ : Dict = image[0, -3:, -3:, -1]
UpperCAmelCase_ : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ : Tuple = np.array([0.5_1_2_5, 0.5_7_1_6, 0.4_8_2_8, 0.5_0_6_0, 0.5_6_5_0, 0.4_7_6_8, 0.5_1_8_5, 0.4_8_9_5, 0.4_9_9_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def A__ ( self: str ) -> Dict:
UpperCAmelCase_ : Any = StableDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-lms-pipe""" ,safety_checker=lowerCamelCase_ )
assert isinstance(lowerCamelCase_ ,lowerCamelCase_ )
assert isinstance(pipe.scheduler ,lowerCamelCase_ )
assert pipe.safety_checker is None
UpperCAmelCase_ : List[Any] = pipe("""example prompt""" ,num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCamelCase_ )
UpperCAmelCase_ : Any = StableDiffusionPipeline.from_pretrained(lowerCamelCase_ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
UpperCAmelCase_ : Optional[int] = pipe("""example prompt""" ,num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != """cuda""" ,"""This test requires a GPU""" )
def A__ ( self: List[str] ) -> Any:
UpperCAmelCase_ : Tuple = self.dummy_cond_unet
UpperCAmelCase_ : Dict = PNDMScheduler(skip_prk_steps=lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = self.dummy_vae
UpperCAmelCase_ : List[str] = self.dummy_text_encoder
UpperCAmelCase_ : Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# put models in fp16
UpperCAmelCase_ : Optional[Any] = unet.half()
UpperCAmelCase_ : Optional[int] = vae.half()
UpperCAmelCase_ : int = bert.half()
# make sure here that pndm scheduler skips prk
UpperCAmelCase_ : Any = StableDiffusionPipeline(
unet=lowerCamelCase_ ,scheduler=lowerCamelCase_ ,vae=lowerCamelCase_ ,text_encoder=lowerCamelCase_ ,tokenizer=lowerCamelCase_ ,safety_checker=lowerCamelCase_ ,feature_extractor=self.dummy_extractor ,)
UpperCAmelCase_ : List[Any] = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : Tuple = """A painting of a squirrel eating a burger"""
UpperCAmelCase_ : Optional[int] = sd_pipe([prompt] ,num_inference_steps=2 ,output_type="""np""" ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: Optional[int] ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self: List[str] ) -> List[Any]:
UpperCAmelCase_ : Tuple = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" ,safety_checker=lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
UpperCAmelCase_ : str = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : str = (
"""portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"""
""" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"""
""" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"""
""" children from bahnhof zoo, detailed """
)
UpperCAmelCase_ : Optional[int] = 4003660346
UpperCAmelCase_ : int = 7
# without safety guidance (sld_guidance_scale = 0)
UpperCAmelCase_ : Dict = torch.manual_seed(lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=0 ,)
UpperCAmelCase_ : Optional[int] = output.images
UpperCAmelCase_ : Union[str, Any] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Dict = [0.2_2_7_8, 0.2_2_3_1, 0.2_2_4_9, 0.2_3_3_3, 0.2_3_0_3, 0.1_8_8_5, 0.2_2_7_3, 0.2_1_4_4, 0.2_1_7_6]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
# without safety guidance (strong configuration)
UpperCAmelCase_ : Union[str, Any] = torch.manual_seed(lowerCamelCase_ )
UpperCAmelCase_ : Any = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=2000 ,sld_warmup_steps=7 ,sld_threshold=0.0_2_5 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,)
UpperCAmelCase_ : Tuple = output.images
UpperCAmelCase_ : Union[str, Any] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : str = [0.2_3_8_3, 0.2_2_7_6, 0.2_3_6, 0.2_1_9_2, 0.2_1_8_6, 0.2_0_5_3, 0.1_9_7_1, 0.1_9_0_1, 0.1_7_1_9]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A__ ( self: Optional[int] ) -> Any:
UpperCAmelCase_ : Any = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" ,safety_checker=lowerCamelCase_ )
UpperCAmelCase_ : Any = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
UpperCAmelCase_ : str = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : Any = """padme amidala taking a bath artwork, safe for work, no nudity"""
UpperCAmelCase_ : List[Any] = 2734971755
UpperCAmelCase_ : Optional[Any] = 7
UpperCAmelCase_ : int = torch.manual_seed(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=0 ,)
UpperCAmelCase_ : Dict = output.images
UpperCAmelCase_ : Tuple = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Optional[Any] = [0.3_5_0_2, 0.3_6_2_2, 0.3_3_9_6, 0.3_6_4_2, 0.3_4_7_8, 0.3_3_1_8, 0.3_5, 0.3_3_4_8, 0.3_2_9_7]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
UpperCAmelCase_ : Any = torch.manual_seed(lowerCamelCase_ )
UpperCAmelCase_ : Tuple = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=2000 ,sld_warmup_steps=7 ,sld_threshold=0.0_2_5 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,)
UpperCAmelCase_ : Dict = output.images
UpperCAmelCase_ : List[Any] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Tuple = [0.5_5_3_1, 0.5_2_0_6, 0.4_8_9_5, 0.5_1_5_6, 0.5_1_8_2, 0.4_7_5_1, 0.4_8_0_2, 0.4_8_0_3, 0.4_4_4_3]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A__ ( self: Union[str, Any] ) -> int:
UpperCAmelCase_ : List[Any] = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" )
UpperCAmelCase_ : List[str] = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : Any = (
"""the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."""
""" leyendecker"""
)
UpperCAmelCase_ : Optional[Any] = 1044355234
UpperCAmelCase_ : List[str] = 12
UpperCAmelCase_ : List[Any] = torch.manual_seed(lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=0 ,)
UpperCAmelCase_ : Any = output.images
UpperCAmelCase_ : Dict = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Optional[Any] = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-7
UpperCAmelCase_ : Optional[int] = torch.manual_seed(lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=2000 ,sld_warmup_steps=7 ,sld_threshold=0.0_2_5 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,)
UpperCAmelCase_ : List[str] = output.images
UpperCAmelCase_ : Any = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Any = np.array([0.5_8_1_8, 0.6_2_8_5, 0.6_8_3_5, 0.6_0_1_9, 0.6_2_5, 0.6_7_5_4, 0.6_0_9_6, 0.6_3_3_4, 0.6_5_6_1] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 345 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase_ = {
'''configuration_whisper''': ['''WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WhisperConfig''', '''WhisperOnnxConfig'''],
'''feature_extraction_whisper''': ['''WhisperFeatureExtractor'''],
'''processing_whisper''': ['''WhisperProcessor'''],
'''tokenization_whisper''': ['''WhisperTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ['''WhisperTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'''WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''WhisperForConditionalGeneration''',
'''WhisperModel''',
'''WhisperPreTrainedModel''',
'''WhisperForAudioClassification''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'''TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWhisperForConditionalGeneration''',
'''TFWhisperModel''',
'''TFWhisperPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'''FlaxWhisperForConditionalGeneration''',
'''FlaxWhisperModel''',
'''FlaxWhisperPreTrainedModel''',
'''FlaxWhisperForAudioClassification''',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 345 |
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class _snake_case :
'''simple docstring'''
def __init__( self: Optional[int] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Tuple=13 ,lowerCamelCase_: int=7 ,lowerCamelCase_: Union[str, Any]=True ,lowerCamelCase_: Dict=True ,lowerCamelCase_: str=True ,lowerCamelCase_: Tuple=True ,lowerCamelCase_: int=99 ,lowerCamelCase_: List[str]=64 ,lowerCamelCase_: Tuple=32 ,lowerCamelCase_: List[str]=5 ,lowerCamelCase_: str=4 ,lowerCamelCase_: str=37 ,lowerCamelCase_: Union[str, Any]="gelu" ,lowerCamelCase_: Union[str, Any]=0.1 ,lowerCamelCase_: str=0.1 ,lowerCamelCase_: List[str]=512 ,lowerCamelCase_: Dict=16 ,lowerCamelCase_: List[str]=2 ,lowerCamelCase_: List[str]=0.0_2 ,lowerCamelCase_: Optional[Any]=3 ,lowerCamelCase_: Union[str, Any]=4 ,lowerCamelCase_: str=None ,) -> List[str]:
UpperCAmelCase_ : Any = parent
UpperCAmelCase_ : List[Any] = batch_size
UpperCAmelCase_ : Union[str, Any] = seq_length
UpperCAmelCase_ : Optional[int] = is_training
UpperCAmelCase_ : Dict = use_input_mask
UpperCAmelCase_ : Any = use_token_type_ids
UpperCAmelCase_ : Tuple = use_labels
UpperCAmelCase_ : List[Any] = vocab_size
UpperCAmelCase_ : str = hidden_size
UpperCAmelCase_ : List[str] = embedding_size
UpperCAmelCase_ : List[Any] = num_hidden_layers
UpperCAmelCase_ : List[Any] = num_attention_heads
UpperCAmelCase_ : List[Any] = intermediate_size
UpperCAmelCase_ : Tuple = hidden_act
UpperCAmelCase_ : str = hidden_dropout_prob
UpperCAmelCase_ : List[str] = attention_probs_dropout_prob
UpperCAmelCase_ : Any = max_position_embeddings
UpperCAmelCase_ : List[str] = type_vocab_size
UpperCAmelCase_ : Any = type_sequence_label_size
UpperCAmelCase_ : Optional[Any] = initializer_range
UpperCAmelCase_ : Optional[int] = num_labels
UpperCAmelCase_ : Optional[int] = num_choices
UpperCAmelCase_ : List[str] = scope
def A__ ( self: Any ) -> Optional[int]:
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase_ : List[str] = None
if self.use_input_mask:
UpperCAmelCase_ : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ : Dict = None
if self.use_token_type_ids:
UpperCAmelCase_ : str = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
UpperCAmelCase_ : int = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : Union[str, Any] = None
if self.use_labels:
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
UpperCAmelCase_ : int = ids_tensor([self.batch_size] ,self.num_choices )
UpperCAmelCase_ : Tuple = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self: Any ) -> Dict:
return MobileBertConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,embedding_size=self.embedding_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=lowerCamelCase_ ,initializer_range=self.initializer_range ,)
def A__ ( self: List[Any] ,lowerCamelCase_: str ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: Any ,lowerCamelCase_: List[Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: str ,lowerCamelCase_: str ) -> int:
UpperCAmelCase_ : Any = MobileBertModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : List[Any] = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ ,token_type_ids=lowerCamelCase_ )
UpperCAmelCase_ : Tuple = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def A__ ( self: Optional[Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Tuple ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Dict ) -> int:
UpperCAmelCase_ : Union[str, Any] = MobileBertForMaskedLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def A__ ( self: str ,lowerCamelCase_: Any ,lowerCamelCase_: Dict ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: str ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: int ) -> int:
UpperCAmelCase_ : List[Any] = MobileBertForNextSentencePrediction(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(
lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, 2) )
def A__ ( self: Tuple ,lowerCamelCase_: Tuple ,lowerCamelCase_: Dict ,lowerCamelCase_: List[str] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Tuple ,lowerCamelCase_: Dict ,lowerCamelCase_: Any ) -> Optional[Any]:
UpperCAmelCase_ : Tuple = MobileBertForPreTraining(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Optional[int] = model(
lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ ,next_sentence_label=lowerCamelCase_ ,)
self.parent.assertEqual(result.prediction_logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape ,(self.batch_size, 2) )
def A__ ( self: Any ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: Any ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Any ,lowerCamelCase_: int ,lowerCamelCase_: List[Any] ) -> List[str]:
UpperCAmelCase_ : Optional[Any] = MobileBertForQuestionAnswering(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : int = model(
lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,start_positions=lowerCamelCase_ ,end_positions=lowerCamelCase_ ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def A__ ( self: List[str] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Any ,lowerCamelCase_: Tuple ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Any ) -> str:
UpperCAmelCase_ : Optional[Any] = self.num_labels
UpperCAmelCase_ : Union[str, Any] = MobileBertForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Optional[int] = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def A__ ( self: Union[str, Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: str ,lowerCamelCase_: Dict ,lowerCamelCase_: Any ,lowerCamelCase_: List[str] ) -> Any:
UpperCAmelCase_ : str = self.num_labels
UpperCAmelCase_ : Optional[int] = MobileBertForTokenClassification(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : List[Any] = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def A__ ( self: Tuple ,lowerCamelCase_: str ,lowerCamelCase_: int ,lowerCamelCase_: Tuple ,lowerCamelCase_: List[Any] ,lowerCamelCase_: str ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: List[Any] ) -> Union[str, Any]:
UpperCAmelCase_ : Union[str, Any] = self.num_choices
UpperCAmelCase_ : Tuple = MobileBertForMultipleChoice(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Dict = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
UpperCAmelCase_ : Union[str, Any] = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
UpperCAmelCase_ : str = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
UpperCAmelCase_ : Optional[int] = model(
lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def A__ ( self: List[str] ) -> str:
UpperCAmelCase_ : str = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : Union[str, Any] = config_and_inputs
UpperCAmelCase_ : Dict = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _snake_case ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
A__ : Dict = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
A__ : List[str] = (
{
"feature-extraction": MobileBertModel,
"fill-mask": MobileBertForMaskedLM,
"question-answering": MobileBertForQuestionAnswering,
"text-classification": MobileBertForSequenceClassification,
"token-classification": MobileBertForTokenClassification,
"zero-shot": MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
A__ : List[str] = True
def A__ ( self: Dict ,lowerCamelCase_: Tuple ,lowerCamelCase_: Tuple ,lowerCamelCase_: int=False ) -> Union[str, Any]:
UpperCAmelCase_ : List[Any] = super()._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ,return_labels=lowerCamelCase_ )
if return_labels:
if model_class in get_values(lowerCamelCase_ ):
UpperCAmelCase_ : Any = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) ,dtype=torch.long ,device=lowerCamelCase_ )
UpperCAmelCase_ : List[str] = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=lowerCamelCase_ )
return inputs_dict
def A__ ( self: List[str] ) -> Any:
UpperCAmelCase_ : List[str] = MobileBertModelTester(self )
UpperCAmelCase_ : Union[str, Any] = ConfigTester(self ,config_class=lowerCamelCase_ ,hidden_size=37 )
def A__ ( self: Optional[Any] ) -> List[Any]:
self.config_tester.run_common_tests()
def A__ ( self: List[str] ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*lowerCamelCase_ )
def A__ ( self: Optional[int] ) -> Optional[int]:
UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*lowerCamelCase_ )
def A__ ( self: Optional[Any] ) -> Tuple:
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*lowerCamelCase_ )
def A__ ( self: List[Any] ) -> List[str]:
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*lowerCamelCase_ )
def A__ ( self: Optional[Any] ) -> Dict:
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*lowerCamelCase_ )
def A__ ( self: Optional[int] ) -> Optional[int]:
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*lowerCamelCase_ )
def A__ ( self: Union[str, Any] ) -> Optional[int]:
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*lowerCamelCase_ )
def A__ ( self: Any ) -> Optional[int]:
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*lowerCamelCase_ )
def lowerCamelCase_ ( _a : Union[str, Any] ):
'''simple docstring'''
return torch.tensor(
_a , dtype=torch.long , device=_a , )
UpperCamelCase_ = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
@slow
def A__ ( self: List[Any] ) -> str:
UpperCAmelCase_ : Any = MobileBertModel.from_pretrained("""google/mobilebert-uncased""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : str = _long_tensor([[101, 7110, 1005, 1056, 2023, 11333, 17413, 1029, 102]] )
with torch.no_grad():
UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ )[0]
UpperCAmelCase_ : Union[str, Any] = torch.Size((1, 9, 512) )
self.assertEqual(output.shape ,lowerCamelCase_ )
UpperCAmelCase_ : Tuple = torch.tensor(
[
[
[-2.473_6526e07, 8.269_1656e04, 1.652_1838e05],
[-5.754_1704e-01, 3.905_6022e00, 4.401_1507e00],
[2.604_7359e00, 1.567_7652e00, -1.732_4188e-01],
]
] ,device=lowerCamelCase_ ,)
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
UpperCAmelCase_ : Dict = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
UpperCAmelCase_ : Dict = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 345 | 1 |
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _snake_case :
'''simple docstring'''
def __init__( self: Tuple ,lowerCamelCase_: Any ,lowerCamelCase_: List[Any]=2 ,lowerCamelCase_: Union[str, Any]=3 ,lowerCamelCase_: str=4 ,lowerCamelCase_: int=2 ,lowerCamelCase_: Optional[Any]=7 ,lowerCamelCase_: Tuple=True ,lowerCamelCase_: List[str]=True ,lowerCamelCase_: Union[str, Any]=True ,lowerCamelCase_: Any=True ,lowerCamelCase_: List[Any]=99 ,lowerCamelCase_: Dict=36 ,lowerCamelCase_: Optional[Any]=3 ,lowerCamelCase_: Union[str, Any]=4 ,lowerCamelCase_: Optional[int]=37 ,lowerCamelCase_: List[str]="gelu" ,lowerCamelCase_: List[str]=0.1 ,lowerCamelCase_: List[Any]=0.1 ,lowerCamelCase_: int=512 ,lowerCamelCase_: Optional[int]=16 ,lowerCamelCase_: Union[str, Any]=2 ,lowerCamelCase_: Union[str, Any]=0.0_2 ,lowerCamelCase_: int=6 ,lowerCamelCase_: Any=6 ,lowerCamelCase_: List[Any]=3 ,lowerCamelCase_: int=4 ,lowerCamelCase_: Any=None ,lowerCamelCase_: Optional[int]=1000 ,) -> List[Any]:
UpperCAmelCase_ : Optional[int] = parent
UpperCAmelCase_ : Dict = batch_size
UpperCAmelCase_ : Tuple = num_channels
UpperCAmelCase_ : Tuple = image_size
UpperCAmelCase_ : int = patch_size
UpperCAmelCase_ : Dict = text_seq_length
UpperCAmelCase_ : int = is_training
UpperCAmelCase_ : List[Any] = use_input_mask
UpperCAmelCase_ : List[Any] = use_token_type_ids
UpperCAmelCase_ : Optional[Any] = use_labels
UpperCAmelCase_ : Tuple = vocab_size
UpperCAmelCase_ : Optional[int] = hidden_size
UpperCAmelCase_ : Tuple = num_hidden_layers
UpperCAmelCase_ : str = num_attention_heads
UpperCAmelCase_ : Optional[int] = intermediate_size
UpperCAmelCase_ : Optional[int] = hidden_act
UpperCAmelCase_ : Any = hidden_dropout_prob
UpperCAmelCase_ : Tuple = attention_probs_dropout_prob
UpperCAmelCase_ : Tuple = max_position_embeddings
UpperCAmelCase_ : Union[str, Any] = type_vocab_size
UpperCAmelCase_ : int = type_sequence_label_size
UpperCAmelCase_ : Tuple = initializer_range
UpperCAmelCase_ : List[str] = coordinate_size
UpperCAmelCase_ : Tuple = shape_size
UpperCAmelCase_ : Union[str, Any] = num_labels
UpperCAmelCase_ : List[str] = num_choices
UpperCAmelCase_ : str = scope
UpperCAmelCase_ : str = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
UpperCAmelCase_ : List[str] = text_seq_length
UpperCAmelCase_ : List[str] = (image_size // patch_size) ** 2 + 1
UpperCAmelCase_ : List[str] = self.text_seq_length + self.image_seq_length
def A__ ( self: Dict ) -> int:
UpperCAmelCase_ : Tuple = ids_tensor([self.batch_size, self.text_seq_length] ,self.vocab_size )
UpperCAmelCase_ : Any = ids_tensor([self.batch_size, self.text_seq_length, 4] ,self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
UpperCAmelCase_ : Any = bbox[i, j, 3]
UpperCAmelCase_ : Dict = bbox[i, j, 1]
UpperCAmelCase_ : Union[str, Any] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
UpperCAmelCase_ : Optional[int] = bbox[i, j, 2]
UpperCAmelCase_ : Union[str, Any] = bbox[i, j, 0]
UpperCAmelCase_ : Union[str, Any] = t
UpperCAmelCase_ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ : List[Any] = None
if self.use_input_mask:
UpperCAmelCase_ : List[str] = random_attention_mask([self.batch_size, self.text_seq_length] )
UpperCAmelCase_ : Any = None
if self.use_token_type_ids:
UpperCAmelCase_ : Any = ids_tensor([self.batch_size, self.text_seq_length] ,self.type_vocab_size )
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : str = None
if self.use_labels:
UpperCAmelCase_ : int = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase_ : List[Any] = ids_tensor([self.batch_size, self.text_seq_length] ,self.num_labels )
UpperCAmelCase_ : int = LayoutLMvaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,coordinate_size=self.coordinate_size ,shape_size=self.shape_size ,input_size=self.image_size ,patch_size=self.patch_size ,)
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def A__ ( self: Union[str, Any] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Any ,lowerCamelCase_: Tuple ,lowerCamelCase_: Any ,lowerCamelCase_: Tuple ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Tuple ) -> str:
UpperCAmelCase_ : Any = LayoutLMvaModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
# text + image
UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ ,pixel_values=lowerCamelCase_ )
UpperCAmelCase_ : Dict = model(
lowerCamelCase_ ,bbox=lowerCamelCase_ ,pixel_values=lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = model(lowerCamelCase_ ,bbox=lowerCamelCase_ ,pixel_values=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ )
UpperCAmelCase_ : str = model(lowerCamelCase_ ,bbox=lowerCamelCase_ ,pixel_values=lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
# text only
UpperCAmelCase_ : List[str] = model(lowerCamelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
UpperCAmelCase_ : List[str] = model(pixel_values=lowerCamelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.image_seq_length, self.hidden_size) )
def A__ ( self: Optional[int] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: str ,lowerCamelCase_: Tuple ,lowerCamelCase_: Any ,lowerCamelCase_: List[str] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Tuple ) -> Dict:
UpperCAmelCase_ : str = self.num_labels
UpperCAmelCase_ : Dict = LayoutLMvaForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(
lowerCamelCase_ ,bbox=lowerCamelCase_ ,pixel_values=lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def A__ ( self: Union[str, Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Dict ,lowerCamelCase_: List[str] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Any ,lowerCamelCase_: List[Any] ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[int] = self.num_labels
UpperCAmelCase_ : Any = LayoutLMvaForTokenClassification(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(
lowerCamelCase_ ,bbox=lowerCamelCase_ ,pixel_values=lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.text_seq_length, self.num_labels) )
def A__ ( self: Optional[Any] ,lowerCamelCase_: str ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Tuple ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: int ,lowerCamelCase_: List[Any] ) -> Any:
UpperCAmelCase_ : Optional[Any] = LayoutLMvaForQuestionAnswering(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(
lowerCamelCase_ ,bbox=lowerCamelCase_ ,pixel_values=lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,start_positions=lowerCamelCase_ ,end_positions=lowerCamelCase_ ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def A__ ( self: str ) -> List[str]:
UpperCAmelCase_ : str = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : str = config_and_inputs
UpperCAmelCase_ : List[Any] = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""pixel_values""": pixel_values,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class _snake_case ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
A__ : str = False
A__ : List[Any] = False
A__ : Optional[int] = False
A__ : int = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
A__ : Tuple = (
{"document-question-answering": LayoutLMvaForQuestionAnswering, "feature-extraction": LayoutLMvaModel}
if is_torch_available()
else {}
)
def A__ ( self: List[str] ,lowerCamelCase_: Any ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: List[str] ) -> Union[str, Any]:
# `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual
# embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has
# the sequence dimension of the text embedding only.
# (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`)
return True
def A__ ( self: Optional[int] ) -> Tuple:
UpperCAmelCase_ : Dict = LayoutLMvaModelTester(self )
UpperCAmelCase_ : Tuple = ConfigTester(self ,config_class=lowerCamelCase_ ,hidden_size=37 )
def A__ ( self: Union[str, Any] ,lowerCamelCase_: str ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Optional[int]=False ) -> Tuple:
UpperCAmelCase_ : Any = copy.deepcopy(lowerCamelCase_ )
if model_class in get_values(lowerCamelCase_ ):
UpperCAmelCase_ : str = {
k: v.unsqueeze(1 ).expand(-1 ,self.model_tester.num_choices ,-1 ).contiguous()
if isinstance(lowerCamelCase_ ,torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(lowerCamelCase_ ):
UpperCAmelCase_ : Union[str, Any] = torch.ones(self.model_tester.batch_size ,dtype=torch.long ,device=lowerCamelCase_ )
elif model_class in get_values(lowerCamelCase_ ):
UpperCAmelCase_ : List[str] = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=lowerCamelCase_ )
UpperCAmelCase_ : Tuple = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=lowerCamelCase_ )
elif model_class in [
*get_values(lowerCamelCase_ ),
]:
UpperCAmelCase_ : List[Any] = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=lowerCamelCase_ )
elif model_class in [
*get_values(lowerCamelCase_ ),
]:
UpperCAmelCase_ : int = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) ,dtype=torch.long ,device=lowerCamelCase_ ,)
return inputs_dict
def A__ ( self: List[str] ) -> int:
self.config_tester.run_common_tests()
def A__ ( self: Optional[int] ) -> Dict:
UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def A__ ( self: Tuple ) -> Optional[Any]:
UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase_ : Union[str, Any] = type
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def A__ ( self: List[Any] ) -> str:
UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase_ )
def A__ ( self: Optional[Any] ) -> Any:
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase_ )
def A__ ( self: Any ) -> List[str]:
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase_ )
@slow
def A__ ( self: Optional[int] ) -> int:
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Optional[Any] = LayoutLMvaModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def lowerCamelCase_ ( ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def A__ ( self: List[Any] ) -> Optional[Any]:
return LayoutLMvaImageProcessor(apply_ocr=lowerCamelCase_ ) if is_vision_available() else None
@slow
def A__ ( self: Optional[int] ) -> str:
UpperCAmelCase_ : int = LayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : Tuple = self.default_image_processor
UpperCAmelCase_ : Union[str, Any] = prepare_img()
UpperCAmelCase_ : List[Any] = image_processor(images=lowerCamelCase_ ,return_tensors="""pt""" ).pixel_values.to(lowerCamelCase_ )
UpperCAmelCase_ : int = torch.tensor([[1, 2]] )
UpperCAmelCase_ : List[Any] = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
UpperCAmelCase_ : List[Any] = model(
input_ids=input_ids.to(lowerCamelCase_ ) ,bbox=bbox.to(lowerCamelCase_ ) ,pixel_values=pixel_values.to(lowerCamelCase_ ) ,)
# verify the logits
UpperCAmelCase_ : Any = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape ,lowerCamelCase_ )
UpperCAmelCase_ : int = torch.tensor(
[[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] ,lowerCamelCase_ ,atol=1e-4 ) )
| 345 |
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: str ) -> int:
UpperCAmelCase_ : List[Any] = """ylacombe/bark-small"""
UpperCAmelCase_ : Tuple = tempfile.mkdtemp()
UpperCAmelCase_ : Union[str, Any] = """en_speaker_1"""
UpperCAmelCase_ : Optional[Any] = """This is a test string"""
UpperCAmelCase_ : int = """speaker_embeddings_path.json"""
UpperCAmelCase_ : Any = """speaker_embeddings"""
def A__ ( self: Tuple ,**lowerCamelCase_: List[str] ) -> List[Any]:
return AutoTokenizer.from_pretrained(self.checkpoint ,**lowerCamelCase_ )
def A__ ( self: str ) -> Union[str, Any]:
shutil.rmtree(self.tmpdirname )
def A__ ( self: List[Any] ) -> int:
UpperCAmelCase_ : int = self.get_tokenizer()
UpperCAmelCase_ : Tuple = BarkProcessor(tokenizer=lowerCamelCase_ )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : Optional[int] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer.get_vocab() )
@slow
def A__ ( self: List[Any] ) -> Optional[int]:
UpperCAmelCase_ : List[Any] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint ,speaker_embeddings_dict_path=self.speaker_embeddings_dict_path ,)
processor.save_pretrained(
self.tmpdirname ,speaker_embeddings_dict_path=self.speaker_embeddings_dict_path ,speaker_embeddings_directory=self.speaker_embeddings_directory ,)
UpperCAmelCase_ : Optional[Any] = self.get_tokenizer(bos_token="""(BOS)""" ,eos_token="""(EOS)""" )
UpperCAmelCase_ : List[Any] = BarkProcessor.from_pretrained(
self.tmpdirname ,self.speaker_embeddings_dict_path ,bos_token="""(BOS)""" ,eos_token="""(EOS)""" ,)
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
def A__ ( self: List[str] ) -> Optional[Any]:
UpperCAmelCase_ : Any = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint ,speaker_embeddings_dict_path=self.speaker_embeddings_dict_path ,)
UpperCAmelCase_ : Optional[int] = 35
UpperCAmelCase_ : Optional[int] = 2
UpperCAmelCase_ : Dict = 8
UpperCAmelCase_ : Optional[int] = {
"""semantic_prompt""": np.ones(lowerCamelCase_ ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
UpperCAmelCase_ : str = processor(text=self.input_string ,voice_preset=lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() ,processed_voice_preset.get(lowerCamelCase_ ,np.array([] ) ).tolist() )
# test loading voice preset from npz file
UpperCAmelCase_ : List[Any] = os.path.join(self.tmpdirname ,"""file.npz""" )
np.savez(lowerCamelCase_ ,**lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = processor(text=self.input_string ,voice_preset=lowerCamelCase_ )
UpperCAmelCase_ : int = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() ,processed_voice_preset.get(lowerCamelCase_ ,np.array([] ) ).tolist() )
# test loading voice preset from the hub
UpperCAmelCase_ : Union[str, Any] = processor(text=self.input_string ,voice_preset=self.voice_preset )
def A__ ( self: Dict ) -> Tuple:
UpperCAmelCase_ : Any = self.get_tokenizer()
UpperCAmelCase_ : Dict = BarkProcessor(tokenizer=lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = processor(text=self.input_string )
UpperCAmelCase_ : str = tokenizer(
self.input_string ,padding="""max_length""" ,max_length=256 ,add_special_tokens=lowerCamelCase_ ,return_attention_mask=lowerCamelCase_ ,return_token_type_ids=lowerCamelCase_ ,)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key].squeeze().tolist() )
| 345 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'''microsoft/markuplm-base''': '''https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json''',
'''microsoft/markuplm-large''': '''https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json''',
}
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : List[str] = "markuplm"
def __init__( self: Any ,lowerCamelCase_: Optional[Any]=30522 ,lowerCamelCase_: Union[str, Any]=768 ,lowerCamelCase_: str=12 ,lowerCamelCase_: List[str]=12 ,lowerCamelCase_: Any=3072 ,lowerCamelCase_: Union[str, Any]="gelu" ,lowerCamelCase_: Optional[int]=0.1 ,lowerCamelCase_: Optional[int]=0.1 ,lowerCamelCase_: Optional[int]=512 ,lowerCamelCase_: Optional[int]=2 ,lowerCamelCase_: int=0.0_2 ,lowerCamelCase_: Optional[Any]=1e-12 ,lowerCamelCase_: Tuple=0 ,lowerCamelCase_: str=0 ,lowerCamelCase_: int=2 ,lowerCamelCase_: List[Any]=256 ,lowerCamelCase_: Optional[int]=1024 ,lowerCamelCase_: str=216 ,lowerCamelCase_: Optional[Any]=1001 ,lowerCamelCase_: Union[str, Any]=32 ,lowerCamelCase_: List[Any]=50 ,lowerCamelCase_: Optional[int]="absolute" ,lowerCamelCase_: List[Any]=True ,lowerCamelCase_: str=None ,**lowerCamelCase_: List[Any] ,) -> Optional[int]:
super().__init__(
pad_token_id=lowerCamelCase_ ,bos_token_id=lowerCamelCase_ ,eos_token_id=lowerCamelCase_ ,**lowerCamelCase_ ,)
UpperCAmelCase_ : List[str] = vocab_size
UpperCAmelCase_ : List[str] = hidden_size
UpperCAmelCase_ : int = num_hidden_layers
UpperCAmelCase_ : Dict = num_attention_heads
UpperCAmelCase_ : int = hidden_act
UpperCAmelCase_ : Union[str, Any] = intermediate_size
UpperCAmelCase_ : int = hidden_dropout_prob
UpperCAmelCase_ : List[str] = attention_probs_dropout_prob
UpperCAmelCase_ : List[Any] = max_position_embeddings
UpperCAmelCase_ : str = type_vocab_size
UpperCAmelCase_ : Optional[Any] = initializer_range
UpperCAmelCase_ : Dict = layer_norm_eps
UpperCAmelCase_ : Optional[Any] = position_embedding_type
UpperCAmelCase_ : List[Any] = use_cache
UpperCAmelCase_ : int = classifier_dropout
# additional properties
UpperCAmelCase_ : Tuple = max_depth
UpperCAmelCase_ : Union[str, Any] = max_xpath_tag_unit_embeddings
UpperCAmelCase_ : str = max_xpath_subs_unit_embeddings
UpperCAmelCase_ : Optional[int] = tag_pad_id
UpperCAmelCase_ : Tuple = subs_pad_id
UpperCAmelCase_ : Union[str, Any] = xpath_unit_hidden_size
| 345 |
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: Optional[int] ) -> Any:
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
UpperCAmelCase_ : Union[str, Any] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : str = -1
UpperCAmelCase_ : Dict = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ )
UpperCAmelCase_ : Any = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
UpperCAmelCase_ : List[Any] = TextStreamer(lowerCamelCase_ )
model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ ,streamer=lowerCamelCase_ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
UpperCAmelCase_ : Optional[int] = cs.out[:-1]
self.assertEqual(lowerCamelCase_ ,lowerCamelCase_ )
def A__ ( self: Dict ) -> Optional[Any]:
UpperCAmelCase_ : str = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
UpperCAmelCase_ : Optional[Any] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = -1
UpperCAmelCase_ : List[Any] = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(lowerCamelCase_ )
UpperCAmelCase_ : List[str] = model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ )
UpperCAmelCase_ : Dict = tokenizer.decode(greedy_ids[0] )
UpperCAmelCase_ : str = TextIteratorStreamer(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer}
UpperCAmelCase_ : str = Thread(target=model.generate ,kwargs=lowerCamelCase_ )
thread.start()
UpperCAmelCase_ : int = """"""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(lowerCamelCase_ ,lowerCamelCase_ )
def A__ ( self: List[Any] ) -> Dict:
UpperCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
UpperCAmelCase_ : Optional[Any] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = -1
UpperCAmelCase_ : Tuple = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(lowerCamelCase_ )
UpperCAmelCase_ : Dict = model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ )
UpperCAmelCase_ : str = greedy_ids[:, input_ids.shape[1] :]
UpperCAmelCase_ : Dict = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
UpperCAmelCase_ : List[Any] = TextStreamer(lowerCamelCase_ ,skip_prompt=lowerCamelCase_ )
model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ ,streamer=lowerCamelCase_ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
UpperCAmelCase_ : List[str] = cs.out[:-1]
self.assertEqual(lowerCamelCase_ ,lowerCamelCase_ )
def A__ ( self: str ) -> str:
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
UpperCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained("""distilgpt2""" )
UpperCAmelCase_ : Optional[Any] = AutoModelForCausalLM.from_pretrained("""distilgpt2""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : Any = -1
UpperCAmelCase_ : Union[str, Any] = torch.ones((1, 5) ,device=lowerCamelCase_ ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
UpperCAmelCase_ : Union[str, Any] = TextStreamer(lowerCamelCase_ ,skip_special_tokens=lowerCamelCase_ )
model.generate(lowerCamelCase_ ,max_new_tokens=1 ,do_sample=lowerCamelCase_ ,streamer=lowerCamelCase_ )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
UpperCAmelCase_ : List[str] = cs.out[:-1] # Remove the final "\n"
UpperCAmelCase_ : Dict = tokenizer(lowerCamelCase_ ,return_tensors="""pt""" )
self.assertEqual(streamer_text_tokenized.input_ids.shape ,(1, 1) )
def A__ ( self: List[str] ) -> Any:
UpperCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
UpperCAmelCase_ : Any = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : List[str] = -1
UpperCAmelCase_ : Optional[Any] = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = TextIteratorStreamer(lowerCamelCase_ ,timeout=0.0_0_1 )
UpperCAmelCase_ : Any = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer}
UpperCAmelCase_ : Dict = Thread(target=model.generate ,kwargs=lowerCamelCase_ )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(lowerCamelCase_ ):
UpperCAmelCase_ : Union[str, Any] = """"""
for new_text in streamer:
streamer_text += new_text
| 345 | 1 |
def lowerCamelCase_ ( _a : str ):
'''simple docstring'''
if not all(x.isalpha() for x in string ):
raise ValueError("""String must only contain alphabetic characters.""" )
UpperCAmelCase_ : Tuple = sorted(string.lower() )
return len(_a ) == len(set(_a ) )
if __name__ == "__main__":
UpperCamelCase_ = input('''Enter a string ''').strip()
UpperCamelCase_ = is_isogram(input_str)
print(F"{input_str} is {'an' if isogram else 'not an'} isogram.")
| 345 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
@property
def A__ ( self: Optional[int] ) -> int:
torch.manual_seed(0 )
UpperCAmelCase_ : Union[str, Any] = UNetaDModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") ,up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") ,)
return model
@property
def A__ ( self: Tuple ) -> Optional[Any]:
torch.manual_seed(0 )
UpperCAmelCase_ : List[str] = VQModel(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=3 ,)
return model
@property
def A__ ( self: Tuple ) -> Any:
torch.manual_seed(0 )
UpperCAmelCase_ : int = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,)
return CLIPTextModel(lowerCamelCase_ )
def A__ ( self: str ) -> Optional[Any]:
UpperCAmelCase_ : str = self.dummy_uncond_unet
UpperCAmelCase_ : List[Any] = DDIMScheduler()
UpperCAmelCase_ : List[Any] = self.dummy_vq_model
UpperCAmelCase_ : Optional[int] = LDMPipeline(unet=lowerCamelCase_ ,vqvae=lowerCamelCase_ ,scheduler=lowerCamelCase_ )
ldm.to(lowerCamelCase_ )
ldm.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : Any = torch.manual_seed(0 )
UpperCAmelCase_ : int = ldm(generator=lowerCamelCase_ ,num_inference_steps=2 ,output_type="""numpy""" ).images
UpperCAmelCase_ : List[str] = torch.manual_seed(0 )
UpperCAmelCase_ : Union[str, Any] = ldm(generator=lowerCamelCase_ ,num_inference_steps=2 ,output_type="""numpy""" ,return_dict=lowerCamelCase_ )[0]
UpperCAmelCase_ : Optional[Any] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ : str = np.array([0.8_5_1_2, 0.8_1_8, 0.6_4_1_1, 0.6_8_0_8, 0.4_4_6_5, 0.5_6_1_8, 0.4_6, 0.6_2_3_1, 0.5_1_7_2] )
UpperCAmelCase_ : Tuple = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: Optional[int] ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = LDMPipeline.from_pretrained("""CompVis/ldm-celebahq-256""" )
ldm.to(lowerCamelCase_ )
ldm.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = torch.manual_seed(0 )
UpperCAmelCase_ : Optional[int] = ldm(generator=lowerCamelCase_ ,num_inference_steps=5 ,output_type="""numpy""" ).images
UpperCAmelCase_ : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCAmelCase_ : int = np.array([0.4_3_9_9, 0.4_4_9_7_5, 0.4_6_8_2_5, 0.4_7_4, 0.4_3_5_9, 0.4_5_8_1, 0.4_5_0_9_5, 0.4_3_4_1, 0.4_4_4_7] )
UpperCAmelCase_ : Union[str, Any] = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 345 | 1 |
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class _snake_case :
'''simple docstring'''
def __init__( self: Tuple ,lowerCamelCase_: Any ,lowerCamelCase_: Optional[int]=13 ,lowerCamelCase_: List[Any]=7 ,lowerCamelCase_: int=True ,lowerCamelCase_: Optional[Any]=True ,lowerCamelCase_: List[str]=True ,lowerCamelCase_: int=True ,lowerCamelCase_: List[str]=True ,lowerCamelCase_: List[Any]=False ,lowerCamelCase_: str=False ,lowerCamelCase_: Tuple=False ,lowerCamelCase_: List[str]=2 ,lowerCamelCase_: List[Any]=99 ,lowerCamelCase_: Optional[Any]=0 ,lowerCamelCase_: List[str]=32 ,lowerCamelCase_: int=5 ,lowerCamelCase_: List[Any]=4 ,lowerCamelCase_: str=0.1 ,lowerCamelCase_: List[str]=0.1 ,lowerCamelCase_: int=512 ,lowerCamelCase_: str=2 ,lowerCamelCase_: Tuple=0.0_2 ,lowerCamelCase_: Optional[Any]=2 ,lowerCamelCase_: Optional[int]=4 ,lowerCamelCase_: List[Any]="last" ,lowerCamelCase_: List[Any]=True ,lowerCamelCase_: Tuple=None ,lowerCamelCase_: Tuple=0 ,) -> str:
UpperCAmelCase_ : List[Any] = parent
UpperCAmelCase_ : str = batch_size
UpperCAmelCase_ : str = seq_length
UpperCAmelCase_ : str = is_training
UpperCAmelCase_ : List[Any] = use_input_lengths
UpperCAmelCase_ : List[Any] = use_token_type_ids
UpperCAmelCase_ : str = use_labels
UpperCAmelCase_ : str = gelu_activation
UpperCAmelCase_ : Tuple = sinusoidal_embeddings
UpperCAmelCase_ : Any = causal
UpperCAmelCase_ : Tuple = asm
UpperCAmelCase_ : List[str] = n_langs
UpperCAmelCase_ : List[str] = vocab_size
UpperCAmelCase_ : Dict = n_special
UpperCAmelCase_ : int = hidden_size
UpperCAmelCase_ : Dict = num_hidden_layers
UpperCAmelCase_ : Tuple = num_attention_heads
UpperCAmelCase_ : Union[str, Any] = hidden_dropout_prob
UpperCAmelCase_ : str = attention_probs_dropout_prob
UpperCAmelCase_ : Any = max_position_embeddings
UpperCAmelCase_ : Union[str, Any] = type_sequence_label_size
UpperCAmelCase_ : Any = initializer_range
UpperCAmelCase_ : Union[str, Any] = num_labels
UpperCAmelCase_ : Dict = num_choices
UpperCAmelCase_ : Optional[int] = summary_type
UpperCAmelCase_ : Any = use_proj
UpperCAmelCase_ : List[str] = scope
UpperCAmelCase_ : List[str] = bos_token_id
def A__ ( self: str ) -> int:
UpperCAmelCase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase_ : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ : int = None
if self.use_input_lengths:
UpperCAmelCase_ : Union[str, Any] = (
ids_tensor([self.batch_size] ,vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
UpperCAmelCase_ : Tuple = None
if self.use_token_type_ids:
UpperCAmelCase_ : int = ids_tensor([self.batch_size, self.seq_length] ,self.n_langs )
UpperCAmelCase_ : Tuple = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : List[Any] = None
if self.use_labels:
UpperCAmelCase_ : str = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size] ,2 ).float()
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size] ,self.num_choices )
UpperCAmelCase_ : str = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def A__ ( self: str ) -> List[str]:
return XLMConfig(
vocab_size=self.vocab_size ,n_special=self.n_special ,emb_dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,gelu_activation=self.gelu_activation ,sinusoidal_embeddings=self.sinusoidal_embeddings ,asm=self.asm ,causal=self.causal ,n_langs=self.n_langs ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,summary_type=self.summary_type ,use_proj=self.use_proj ,num_labels=self.num_labels ,bos_token_id=self.bos_token_id ,)
def A__ ( self: Tuple ,lowerCamelCase_: Any ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Any ,lowerCamelCase_: List[str] ,lowerCamelCase_: Any ,) -> Optional[int]:
UpperCAmelCase_ : List[str] = XLMModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Optional[int] = model(lowerCamelCase_ ,lengths=lowerCamelCase_ ,langs=lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = model(lowerCamelCase_ ,langs=lowerCamelCase_ )
UpperCAmelCase_ : str = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def A__ ( self: Optional[int] ,lowerCamelCase_: Any ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Any ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: str ,lowerCamelCase_: List[Any] ,lowerCamelCase_: int ,lowerCamelCase_: Optional[int] ,) -> str:
UpperCAmelCase_ : List[Any] = XLMWithLMHeadModel(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Dict = model(lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def A__ ( self: Optional[int] ,lowerCamelCase_: str ,lowerCamelCase_: Dict ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: int ,lowerCamelCase_: Dict ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: str ,lowerCamelCase_: Dict ,lowerCamelCase_: str ,) -> List[str]:
UpperCAmelCase_ : Tuple = XLMForQuestionAnsweringSimple(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Any = model(lowerCamelCase_ )
UpperCAmelCase_ : Tuple = model(lowerCamelCase_ ,start_positions=lowerCamelCase_ ,end_positions=lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = outputs
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def A__ ( self: Optional[int] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: int ,lowerCamelCase_: int ,lowerCamelCase_: str ,lowerCamelCase_: int ,lowerCamelCase_: Union[str, Any] ,) -> Optional[int]:
UpperCAmelCase_ : Optional[Any] = XLMForQuestionAnswering(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = model(
lowerCamelCase_ ,start_positions=lowerCamelCase_ ,end_positions=lowerCamelCase_ ,cls_index=lowerCamelCase_ ,is_impossible=lowerCamelCase_ ,p_mask=lowerCamelCase_ ,)
UpperCAmelCase_ : int = model(
lowerCamelCase_ ,start_positions=lowerCamelCase_ ,end_positions=lowerCamelCase_ ,cls_index=lowerCamelCase_ ,is_impossible=lowerCamelCase_ ,)
((UpperCAmelCase_) , ) : Optional[int] = result_with_labels.to_tuple()
UpperCAmelCase_ : Dict = model(lowerCamelCase_ ,start_positions=lowerCamelCase_ ,end_positions=lowerCamelCase_ )
((UpperCAmelCase_) , ) : List[str] = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape ,() )
self.parent.assertEqual(result.start_top_log_probs.shape ,(self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape ,(self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape ,(self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape ,(self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape ,(self.batch_size,) )
def A__ ( self: Optional[Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: str ,lowerCamelCase_: Any ,lowerCamelCase_: int ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Tuple ,) -> str:
UpperCAmelCase_ : int = XLMForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : List[Any] = model(lowerCamelCase_ )
UpperCAmelCase_ : int = model(lowerCamelCase_ ,labels=lowerCamelCase_ )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def A__ ( self: List[Any] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: Dict ,lowerCamelCase_: List[Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Tuple ,) -> Any:
UpperCAmelCase_ : Tuple = self.num_labels
UpperCAmelCase_ : Union[str, Any] = XLMForTokenClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : List[Any] = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def A__ ( self: Optional[int] ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: Any ,lowerCamelCase_: int ,lowerCamelCase_: str ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: List[str] ,) -> Optional[Any]:
UpperCAmelCase_ : Union[str, Any] = self.num_choices
UpperCAmelCase_ : List[Any] = XLMForMultipleChoice(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Any = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
UpperCAmelCase_ : int = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
UpperCAmelCase_ : Optional[int] = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
UpperCAmelCase_ : Tuple = model(
lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def A__ ( self: int ) -> Union[str, Any]:
UpperCAmelCase_ : Union[str, Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : List[Any] = config_and_inputs
UpperCAmelCase_ : List[str] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """lengths""": input_lengths}
return config, inputs_dict
@require_torch
class _snake_case ( __snake_case , __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
A__ : List[str] = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
A__ : Dict = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
A__ : str = (
{
"feature-extraction": XLMModel,
"fill-mask": XLMWithLMHeadModel,
"question-answering": XLMForQuestionAnsweringSimple,
"text-classification": XLMForSequenceClassification,
"text-generation": XLMWithLMHeadModel,
"token-classification": XLMForTokenClassification,
"zero-shot": XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def A__ ( self: List[str] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: str ,lowerCamelCase_: Tuple ,lowerCamelCase_: Optional[Any] ) -> Any:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def A__ ( self: Optional[Any] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: Optional[int]=False ) -> Tuple:
UpperCAmelCase_ : int = super()._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ,return_labels=lowerCamelCase_ )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
UpperCAmelCase_ : int = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=lowerCamelCase_ )
UpperCAmelCase_ : int = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=lowerCamelCase_ )
return inputs_dict
def A__ ( self: str ) -> Optional[Any]:
UpperCAmelCase_ : Dict = XLMModelTester(self )
UpperCAmelCase_ : Optional[Any] = ConfigTester(self ,config_class=lowerCamelCase_ ,emb_dim=37 )
def A__ ( self: str ) -> Optional[Any]:
self.config_tester.run_common_tests()
def A__ ( self: Union[str, Any] ) -> int:
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*lowerCamelCase_ )
def A__ ( self: List[Any] ) -> Dict:
UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*lowerCamelCase_ )
def A__ ( self: Optional[Any] ) -> str:
UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*lowerCamelCase_ )
def A__ ( self: Optional[Any] ) -> Union[str, Any]:
UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*lowerCamelCase_ )
def A__ ( self: Optional[Any] ) -> Union[str, Any]:
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*lowerCamelCase_ )
def A__ ( self: Optional[Any] ) -> Any:
UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*lowerCamelCase_ )
def A__ ( self: Dict ) -> int:
UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*lowerCamelCase_ )
def A__ ( self: str ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: Any ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: Dict ,lowerCamelCase_: List[Any]=False ,lowerCamelCase_: List[Any]=1 ) -> Dict:
self.assertIsInstance(lowerCamelCase_ ,lowerCamelCase_ )
self.assertListEqual(
[isinstance(lowerCamelCase_ ,lowerCamelCase_ ) for iter_attentions in attentions] ,[True] * len(lowerCamelCase_ ) )
self.assertEqual(len(lowerCamelCase_ ) ,(max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(lowerCamelCase_ ):
# adds PAD dummy token
UpperCAmelCase_ : List[str] = min_length + idx + 1
UpperCAmelCase_ : Dict = min_length + idx + 1
UpperCAmelCase_ : Optional[int] = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] ,[expected_shape] * len(lowerCamelCase_ ) )
def A__ ( self: List[Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Tuple ,lowerCamelCase_: int ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Union[str, Any]=False ,lowerCamelCase_: Optional[int]=1 ) -> Any:
self.assertIsInstance(lowerCamelCase_ ,lowerCamelCase_ )
self.assertListEqual(
[isinstance(lowerCamelCase_ ,lowerCamelCase_ ) for iter_hidden_states in hidden_states] ,[True] * len(lowerCamelCase_ ) ,)
self.assertEqual(len(lowerCamelCase_ ) ,(max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(lowerCamelCase_ ):
# adds PAD dummy token
UpperCAmelCase_ : List[str] = min_length + idx + 1
UpperCAmelCase_ : Optional[Any] = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] ,[expected_shape] * len(lowerCamelCase_ ) ,)
pass
@slow
def A__ ( self: Optional[Any] ) -> int:
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : int = XLMModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
@require_torch
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
@slow
def A__ ( self: Tuple ) -> Optional[int]:
UpperCAmelCase_ : Any = XLMWithLMHeadModel.from_pretrained("""xlm-mlm-en-2048""" )
model.to(lowerCamelCase_ )
UpperCAmelCase_ : str = torch.tensor([[14, 447]] ,dtype=torch.long ,device=lowerCamelCase_ ) # the president
UpperCAmelCase_ : Optional[int] = [
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
UpperCAmelCase_ : Union[str, Any] = model.generate(lowerCamelCase_ ,do_sample=lowerCamelCase_ )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() ,lowerCamelCase_ )
| 345 |
def lowerCamelCase_ ( _a : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = [0] * len(_a )
UpperCAmelCase_ : Dict = []
UpperCAmelCase_ : Optional[int] = []
UpperCAmelCase_ : Dict = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(_a ) ):
if indegree[i] == 0:
queue.append(_a )
while queue:
UpperCAmelCase_ : List[str] = queue.pop(0 )
cnt += 1
topo.append(_a )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(_a )
if cnt != len(_a ):
print("""Cycle exists""" )
else:
print(_a )
# Adjacency List of Graph
UpperCamelCase_ = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 345 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'''naver-clova-ix/donut-base''': '''https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json''',
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : Optional[int] = "donut-swin"
A__ : Union[str, Any] = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self: Union[str, Any] ,lowerCamelCase_: Union[str, Any]=224 ,lowerCamelCase_: str=4 ,lowerCamelCase_: Optional[Any]=3 ,lowerCamelCase_: Union[str, Any]=96 ,lowerCamelCase_: Optional[Any]=[2, 2, 6, 2] ,lowerCamelCase_: str=[3, 6, 12, 24] ,lowerCamelCase_: Optional[int]=7 ,lowerCamelCase_: Optional[Any]=4.0 ,lowerCamelCase_: Dict=True ,lowerCamelCase_: List[Any]=0.0 ,lowerCamelCase_: Dict=0.0 ,lowerCamelCase_: Dict=0.1 ,lowerCamelCase_: Any="gelu" ,lowerCamelCase_: Optional[Any]=False ,lowerCamelCase_: List[Any]=0.0_2 ,lowerCamelCase_: List[Any]=1e-5 ,**lowerCamelCase_: Tuple ,) -> Tuple:
super().__init__(**lowerCamelCase_ )
UpperCAmelCase_ : Any = image_size
UpperCAmelCase_ : Optional[Any] = patch_size
UpperCAmelCase_ : Union[str, Any] = num_channels
UpperCAmelCase_ : List[Any] = embed_dim
UpperCAmelCase_ : Dict = depths
UpperCAmelCase_ : List[Any] = len(lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = num_heads
UpperCAmelCase_ : Optional[Any] = window_size
UpperCAmelCase_ : Optional[Any] = mlp_ratio
UpperCAmelCase_ : Any = qkv_bias
UpperCAmelCase_ : Union[str, Any] = hidden_dropout_prob
UpperCAmelCase_ : Dict = attention_probs_dropout_prob
UpperCAmelCase_ : str = drop_path_rate
UpperCAmelCase_ : Dict = hidden_act
UpperCAmelCase_ : List[Any] = use_absolute_embeddings
UpperCAmelCase_ : Optional[int] = layer_norm_eps
UpperCAmelCase_ : Any = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCAmelCase_ : Any = int(embed_dim * 2 ** (len(lowerCamelCase_ ) - 1) )
| 345 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'''microsoft/swinv2-tiny-patch4-window8-256''': (
'''https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json'''
),
}
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : Optional[Any] = "swinv2"
A__ : int = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self: List[str] ,lowerCamelCase_: List[str]=224 ,lowerCamelCase_: List[str]=4 ,lowerCamelCase_: List[Any]=3 ,lowerCamelCase_: Optional[Any]=96 ,lowerCamelCase_: Any=[2, 2, 6, 2] ,lowerCamelCase_: Dict=[3, 6, 12, 24] ,lowerCamelCase_: str=7 ,lowerCamelCase_: Optional[Any]=4.0 ,lowerCamelCase_: Tuple=True ,lowerCamelCase_: List[str]=0.0 ,lowerCamelCase_: Optional[int]=0.0 ,lowerCamelCase_: List[str]=0.1 ,lowerCamelCase_: str="gelu" ,lowerCamelCase_: str=False ,lowerCamelCase_: Dict=0.0_2 ,lowerCamelCase_: Union[str, Any]=1e-5 ,lowerCamelCase_: str=32 ,**lowerCamelCase_: List[str] ,) -> Tuple:
super().__init__(**lowerCamelCase_ )
UpperCAmelCase_ : Tuple = image_size
UpperCAmelCase_ : Tuple = patch_size
UpperCAmelCase_ : Dict = num_channels
UpperCAmelCase_ : List[Any] = embed_dim
UpperCAmelCase_ : Dict = depths
UpperCAmelCase_ : Dict = len(lowerCamelCase_ )
UpperCAmelCase_ : str = num_heads
UpperCAmelCase_ : Tuple = window_size
UpperCAmelCase_ : int = mlp_ratio
UpperCAmelCase_ : str = qkv_bias
UpperCAmelCase_ : Any = hidden_dropout_prob
UpperCAmelCase_ : Tuple = attention_probs_dropout_prob
UpperCAmelCase_ : int = drop_path_rate
UpperCAmelCase_ : Optional[Any] = hidden_act
UpperCAmelCase_ : List[str] = use_absolute_embeddings
UpperCAmelCase_ : Dict = layer_norm_eps
UpperCAmelCase_ : int = initializer_range
UpperCAmelCase_ : Union[str, Any] = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCAmelCase_ : List[str] = int(embed_dim * 2 ** (len(lowerCamelCase_ ) - 1) )
UpperCAmelCase_ : Any = (0, 0, 0, 0)
| 345 | 1 |
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
# See all LED models at https://huggingface.co/models?filter=LED
UpperCamelCase_ = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
UpperCamelCase_ = {
'''allenai/led-base-16384''': 16384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def lowerCamelCase_ ( ):
'''simple docstring'''
UpperCAmelCase_ : int = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
UpperCAmelCase_ : Dict = bs[:]
UpperCAmelCase_ : Any = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_a )
cs.append(2**8 + n )
n += 1
UpperCAmelCase_ : Any = [chr(_a ) for n in cs]
return dict(zip(_a , _a ) )
def lowerCamelCase_ ( _a : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = set()
UpperCAmelCase_ : List[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase_ : Optional[int] = char
return pairs
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : str = VOCAB_FILES_NAMES
A__ : List[str] = PRETRAINED_VOCAB_FILES_MAP
A__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Optional[int] = ["input_ids", "attention_mask"]
def __init__( self: Union[str, Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Any ,lowerCamelCase_: Union[str, Any]="replace" ,lowerCamelCase_: Optional[Any]="<s>" ,lowerCamelCase_: List[Any]="</s>" ,lowerCamelCase_: List[str]="</s>" ,lowerCamelCase_: int="<s>" ,lowerCamelCase_: int="<unk>" ,lowerCamelCase_: str="<pad>" ,lowerCamelCase_: Optional[Any]="<mask>" ,lowerCamelCase_: List[str]=False ,**lowerCamelCase_: Tuple ,) -> Any:
UpperCAmelCase_ : Union[str, Any] = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else bos_token
UpperCAmelCase_ : int = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else eos_token
UpperCAmelCase_ : List[str] = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else sep_token
UpperCAmelCase_ : List[str] = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else cls_token
UpperCAmelCase_ : Optional[Any] = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else unk_token
UpperCAmelCase_ : List[str] = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase_ : str = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else mask_token
super().__init__(
errors=lowerCamelCase_ ,bos_token=lowerCamelCase_ ,eos_token=lowerCamelCase_ ,unk_token=lowerCamelCase_ ,sep_token=lowerCamelCase_ ,cls_token=lowerCamelCase_ ,pad_token=lowerCamelCase_ ,mask_token=lowerCamelCase_ ,add_prefix_space=lowerCamelCase_ ,**lowerCamelCase_ ,)
with open(lowerCamelCase_ ,encoding="""utf-8""" ) as vocab_handle:
UpperCAmelCase_ : Union[str, Any] = json.load(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = {v: k for k, v in self.encoder.items()}
UpperCAmelCase_ : Any = errors # how to handle errors in decoding
UpperCAmelCase_ : int = bytes_to_unicode()
UpperCAmelCase_ : Dict = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCamelCase_ ,encoding="""utf-8""" ) as merges_handle:
UpperCAmelCase_ : Any = merges_handle.read().split("""\n""" )[1:-1]
UpperCAmelCase_ : int = [tuple(merge.split() ) for merge in bpe_merges]
UpperCAmelCase_ : Union[str, Any] = dict(zip(lowerCamelCase_ ,range(len(lowerCamelCase_ ) ) ) )
UpperCAmelCase_ : Tuple = {}
UpperCAmelCase_ : Optional[int] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCAmelCase_ : int = re.compile(R"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def A__ ( self: List[str] ) -> List[str]:
return len(self.encoder )
def A__ ( self: Any ) -> Union[str, Any]:
return dict(self.encoder ,**self.added_tokens_encoder )
def A__ ( self: Tuple ,lowerCamelCase_: Dict ) -> Optional[Any]:
if token in self.cache:
return self.cache[token]
UpperCAmelCase_ : Union[str, Any] = tuple(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = get_pairs(lowerCamelCase_ )
if not pairs:
return token
while True:
UpperCAmelCase_ : Union[str, Any] = min(lowerCamelCase_ ,key=lambda lowerCamelCase_ : self.bpe_ranks.get(lowerCamelCase_ ,float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase_ , UpperCAmelCase_ : Any = bigram
UpperCAmelCase_ : Optional[Any] = []
UpperCAmelCase_ : List[str] = 0
while i < len(lowerCamelCase_ ):
try:
UpperCAmelCase_ : str = word.index(lowerCamelCase_ ,lowerCamelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCAmelCase_ : Union[str, Any] = j
if word[i] == first and i < len(lowerCamelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase_ : List[str] = tuple(lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = new_word
if len(lowerCamelCase_ ) == 1:
break
else:
UpperCAmelCase_ : List[str] = get_pairs(lowerCamelCase_ )
UpperCAmelCase_ : int = """ """.join(lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = word
return word
def A__ ( self: Union[str, Any] ,lowerCamelCase_: Tuple ) -> List[str]:
UpperCAmelCase_ : str = []
for token in re.findall(self.pat ,lowerCamelCase_ ):
UpperCAmelCase_ : List[Any] = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCamelCase_ ).split(""" """ ) )
return bpe_tokens
def A__ ( self: List[Any] ,lowerCamelCase_: Optional[Any] ) -> Optional[int]:
return self.encoder.get(lowerCamelCase_ ,self.encoder.get(self.unk_token ) )
def A__ ( self: List[str] ,lowerCamelCase_: str ) -> Optional[Any]:
return self.decoder.get(lowerCamelCase_ )
def A__ ( self: List[str] ,lowerCamelCase_: List[str] ) -> List[Any]:
UpperCAmelCase_ : str = """""".join(lowerCamelCase_ )
UpperCAmelCase_ : int = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" ,errors=self.errors )
return text
def A__ ( self: Optional[Any] ,lowerCamelCase_: str ,lowerCamelCase_: Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(lowerCamelCase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase_ : List[Any] = os.path.join(
lowerCamelCase_ ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCAmelCase_ : List[str] = os.path.join(
lowerCamelCase_ ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=lowerCamelCase_ ,ensure_ascii=lowerCamelCase_ ) + """\n""" )
UpperCAmelCase_ : str = 0
with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda lowerCamelCase_ : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
""" Please check that the tokenizer is not corrupted!""" )
UpperCAmelCase_ : Tuple = token_index
writer.write(""" """.join(lowerCamelCase_ ) + """\n""" )
index += 1
return vocab_file, merge_file
def A__ ( self: str ,lowerCamelCase_: List[int] ,lowerCamelCase_: Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase_ : int = [self.cls_token_id]
UpperCAmelCase_ : Optional[int] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A__ ( self: Union[str, Any] ,lowerCamelCase_: List[int] ,lowerCamelCase_: Optional[List[int]] = None ,lowerCamelCase_: bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_ ,token_ids_a=lowerCamelCase_ ,already_has_special_tokens=lowerCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase_ )) + [1]
return [1] + ([0] * len(lowerCamelCase_ )) + [1, 1] + ([0] * len(lowerCamelCase_ )) + [1]
def A__ ( self: str ,lowerCamelCase_: List[int] ,lowerCamelCase_: Optional[List[int]] = None ) -> List[int]:
UpperCAmelCase_ : Optional[Any] = [self.sep_token_id]
UpperCAmelCase_ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A__ ( self: Optional[Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: str=False ,**lowerCamelCase_: List[str] ) -> Optional[int]:
UpperCAmelCase_ : Optional[int] = kwargs.pop("""add_prefix_space""" ,self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCamelCase_ ) > 0 and not text[0].isspace()):
UpperCAmelCase_ : Dict = """ """ + text
return (text, kwargs)
def A__ ( self: List[str] ,lowerCamelCase_: Union[Dict[str, EncodedInput], BatchEncoding] ,lowerCamelCase_: Optional[int] = None ,lowerCamelCase_: PaddingStrategy = PaddingStrategy.DO_NOT_PAD ,lowerCamelCase_: Optional[int] = None ,lowerCamelCase_: Optional[bool] = None ,) -> dict:
UpperCAmelCase_ : Optional[int] = super()._pad(
encoded_inputs=lowerCamelCase_ ,max_length=lowerCamelCase_ ,padding_strategy=lowerCamelCase_ ,pad_to_multiple_of=lowerCamelCase_ ,return_attention_mask=lowerCamelCase_ ,)
# Load from model defaults
if return_attention_mask is None:
UpperCAmelCase_ : str = """attention_mask""" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
UpperCAmelCase_ : str = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
UpperCAmelCase_ : List[Any] = len(encoded_inputs["""global_attention_mask"""] ) != len(lowerCamelCase_ )
if needs_to_be_padded:
UpperCAmelCase_ : Dict = len(lowerCamelCase_ ) - len(encoded_inputs["""global_attention_mask"""] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
UpperCAmelCase_ : str = (
encoded_inputs["""global_attention_mask"""] + [-1] * difference
)
elif self.padding_side == "left":
UpperCAmelCase_ : List[str] = [-1] * difference + encoded_inputs[
"""global_attention_mask"""
]
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return encoded_inputs
| 345 |
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: int ) -> str:
# A mock response for an HTTP head request to emulate server down
UpperCAmelCase_ : List[str] = mock.Mock()
UpperCAmelCase_ : List[Any] = 500
UpperCAmelCase_ : Union[str, Any] = {}
UpperCAmelCase_ : Union[str, Any] = HTTPError
UpperCAmelCase_ : Any = {}
# Download this model to make sure it's in the cache.
UpperCAmelCase_ : Union[str, Any] = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("""requests.Session.request""" ,return_value=lowerCamelCase_ ) as mock_head:
UpperCAmelCase_ : Any = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def A__ ( self: str ) -> int:
# A mock response for an HTTP head request to emulate server down
UpperCAmelCase_ : str = mock.Mock()
UpperCAmelCase_ : Optional[int] = 500
UpperCAmelCase_ : int = {}
UpperCAmelCase_ : Union[str, Any] = HTTPError
UpperCAmelCase_ : List[Any] = {}
# Download this model to make sure it's in the cache.
UpperCAmelCase_ : Optional[int] = GPTaTokenizerFast.from_pretrained("""gpt2""" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("""requests.Session.request""" ,return_value=lowerCamelCase_ ) as mock_head:
UpperCAmelCase_ : Any = GPTaTokenizerFast.from_pretrained("""gpt2""" )
# This check we did call the fake head request
mock_head.assert_called()
def A__ ( self: str ) -> Dict:
# This test is for deprecated behavior and can be removed in v5
try:
UpperCAmelCase_ : Any = tempfile.mktemp()
with open(lowerCamelCase_ ,"""wb""" ) as f:
http_get("""https://huggingface.co/albert-base-v1/resolve/main/spiece.model""" ,lowerCamelCase_ )
UpperCAmelCase_ : Tuple = AlbertTokenizer.from_pretrained(lowerCamelCase_ )
finally:
os.remove(lowerCamelCase_ )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile("""tokenizer.json""" ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open("""tokenizer.json""" ,"""wb""" ) as f:
http_get("""https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json""" ,lowerCamelCase_ )
UpperCAmelCase_ : str = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size ,1000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove("""tokenizer.json""" )
def A__ ( self: List[str] ) -> Tuple:
# This test is for deprecated behavior and can be removed in v5
UpperCAmelCase_ : str = AlbertTokenizer.from_pretrained("""https://huggingface.co/albert-base-v1/resolve/main/spiece.model""" )
@is_staging_test
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
A__ : str = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def A__ ( cls: Dict ) -> Optional[int]:
UpperCAmelCase_ : List[str] = TOKEN
HfFolder.save_token(lowerCamelCase_ )
@classmethod
def A__ ( cls: Optional[Any] ) -> List[str]:
try:
delete_repo(token=cls._token ,repo_id="""test-tokenizer""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id="""valid_org/test-tokenizer-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id="""test-dynamic-tokenizer""" )
except HTTPError:
pass
def A__ ( self: Any ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ : Tuple = os.path.join(lowerCamelCase_ ,"""vocab.txt""" )
with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
UpperCAmelCase_ : List[Any] = BertTokenizer(lowerCamelCase_ )
tokenizer.push_to_hub("""test-tokenizer""" ,use_auth_token=self._token )
UpperCAmelCase_ : List[Any] = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
# Reset repo
delete_repo(token=self._token ,repo_id="""test-tokenizer""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowerCamelCase_ ,repo_id="""test-tokenizer""" ,push_to_hub=lowerCamelCase_ ,use_auth_token=self._token )
UpperCAmelCase_ : List[Any] = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
def A__ ( self: Optional[int] ) -> Any:
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ : List[Any] = os.path.join(lowerCamelCase_ ,"""vocab.txt""" )
with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
UpperCAmelCase_ : Dict = BertTokenizer(lowerCamelCase_ )
tokenizer.push_to_hub("""valid_org/test-tokenizer-org""" ,use_auth_token=self._token )
UpperCAmelCase_ : Dict = BertTokenizer.from_pretrained("""valid_org/test-tokenizer-org""" )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
# Reset repo
delete_repo(token=self._token ,repo_id="""valid_org/test-tokenizer-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
lowerCamelCase_ ,repo_id="""valid_org/test-tokenizer-org""" ,push_to_hub=lowerCamelCase_ ,use_auth_token=self._token )
UpperCAmelCase_ : List[Any] = BertTokenizer.from_pretrained("""valid_org/test-tokenizer-org""" )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
@require_tokenizers
def A__ ( self: Optional[int] ) -> Optional[Any]:
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ : Any = os.path.join(lowerCamelCase_ ,"""vocab.txt""" )
with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
UpperCAmelCase_ : Optional[Any] = CustomTokenizer(lowerCamelCase_ )
# No fast custom tokenizer
tokenizer.push_to_hub("""test-dynamic-tokenizer""" ,use_auth_token=self._token )
UpperCAmelCase_ : Optional[Any] = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''' ,trust_remote_code=lowerCamelCase_ )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,"""CustomTokenizer""" )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ : List[str] = os.path.join(lowerCamelCase_ ,"""vocab.txt""" )
with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
UpperCAmelCase_ : str = BertTokenizerFast.from_pretrained(lowerCamelCase_ )
bert_tokenizer.save_pretrained(lowerCamelCase_ )
UpperCAmelCase_ : List[str] = CustomTokenizerFast.from_pretrained(lowerCamelCase_ )
tokenizer.push_to_hub("""test-dynamic-tokenizer""" ,use_auth_token=self._token )
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''' ,trust_remote_code=lowerCamelCase_ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,"""CustomTokenizerFast""" )
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(
F'''{USER}/test-dynamic-tokenizer''' ,use_fast=lowerCamelCase_ ,trust_remote_code=lowerCamelCase_ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,"""CustomTokenizer""" )
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: Optional[Any] ) -> Any:
UpperCAmelCase_ : Any = Trie()
trie.add("""Hello 友達""" )
self.assertEqual(trie.data ,{"""H""": {"""e""": {"""l""": {"""l""": {"""o""": {""" """: {"""友""": {"""達""": {"""""": 1}}}}}}}}} )
trie.add("""Hello""" )
trie.data
self.assertEqual(trie.data ,{"""H""": {"""e""": {"""l""": {"""l""": {"""o""": {"""""": 1, """ """: {"""友""": {"""達""": {"""""": 1}}}}}}}}} )
def A__ ( self: Tuple ) -> Optional[int]:
UpperCAmelCase_ : str = Trie()
self.assertEqual(trie.split("""[CLS] This is a extra_id_100""" ) ,["""[CLS] This is a extra_id_100"""] )
trie.add("""[CLS]""" )
trie.add("""extra_id_1""" )
trie.add("""extra_id_100""" )
self.assertEqual(trie.split("""[CLS] This is a extra_id_100""" ) ,["""[CLS]""", """ This is a """, """extra_id_100"""] )
def A__ ( self: Optional[Any] ) -> Optional[int]:
UpperCAmelCase_ : Dict = Trie()
trie.add("""A""" )
self.assertEqual(trie.split("""ABC""" ) ,["""A""", """BC"""] )
self.assertEqual(trie.split("""BCA""" ) ,["""BC""", """A"""] )
def A__ ( self: Union[str, Any] ) -> int:
UpperCAmelCase_ : List[str] = Trie()
trie.add("""TOKEN]""" )
trie.add("""[SPECIAL_TOKEN]""" )
self.assertEqual(trie.split("""This is something [SPECIAL_TOKEN]""" ) ,["""This is something """, """[SPECIAL_TOKEN]"""] )
def A__ ( self: int ) -> Union[str, Any]:
UpperCAmelCase_ : List[str] = Trie()
trie.add("""A""" )
trie.add("""P""" )
trie.add("""[SPECIAL_TOKEN]""" )
self.assertEqual(trie.split("""This is something [SPECIAL_TOKEN]""" ) ,["""This is something """, """[SPECIAL_TOKEN]"""] )
def A__ ( self: int ) -> List[str]:
UpperCAmelCase_ : int = Trie()
trie.add("""AB""" )
trie.add("""B""" )
trie.add("""C""" )
self.assertEqual(trie.split("""ABC""" ) ,["""AB""", """C"""] )
def A__ ( self: str ) -> Optional[int]:
UpperCAmelCase_ : Optional[Any] = Trie()
trie.add("""ABC""" )
trie.add("""B""" )
trie.add("""CD""" )
self.assertEqual(trie.split("""ABCD""" ) ,["""ABC""", """D"""] )
def A__ ( self: List[Any] ) -> Any:
# Even if the offsets are wrong, we necessarily output correct string
# parts.
UpperCAmelCase_ : Tuple = Trie()
UpperCAmelCase_ : Optional[Any] = trie.cut_text("""ABC""" ,[0, 0, 2, 1, 2, 3] )
self.assertEqual(lowerCamelCase_ ,["""AB""", """C"""] )
| 345 | 1 |
import math
def lowerCamelCase_ ( _a : int ):
'''simple docstring'''
UpperCAmelCase_ : str = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(_a )
def lowerCamelCase_ ( _a : float = 1 / 1_2345 ):
'''simple docstring'''
UpperCAmelCase_ : str = 0
UpperCAmelCase_ : Dict = 0
UpperCAmelCase_ : Union[str, Any] = 3
while True:
UpperCAmelCase_ : Any = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(_a ):
UpperCAmelCase_ : Optional[int] = int(_a )
total_partitions += 1
if check_partition_perfect(_a ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(_a )
integer += 1
if __name__ == "__main__":
print(F"{solution() = }")
| 345 |
from ..utils import DummyObject, requires_backends
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Tuple = ["flax"]
def __init__( self: str ,*lowerCamelCase_: int ,**lowerCamelCase_: List[str] ) -> str:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Optional[Any] ,*lowerCamelCase_: Dict ,**lowerCamelCase_: List[str] ) -> Any:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: Optional[int] ,*lowerCamelCase_: Optional[int] ,**lowerCamelCase_: int ) -> Optional[int]:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Any = ["flax"]
def __init__( self: int ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: Tuple ) -> Union[str, Any]:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Optional[int] ,*lowerCamelCase_: Optional[int] ,**lowerCamelCase_: List[str] ) -> Union[str, Any]:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: Tuple ,*lowerCamelCase_: Tuple ,**lowerCamelCase_: Any ) -> int:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Dict = ["flax"]
def __init__( self: Dict ,*lowerCamelCase_: Optional[int] ,**lowerCamelCase_: List[Any] ) -> Any:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Tuple ,*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: List[Any] ) -> str:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: int ,*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: Optional[Any] ) -> int:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : List[str] = ["flax"]
def __init__( self: str ,*lowerCamelCase_: List[str] ,**lowerCamelCase_: Optional[int] ) -> Union[str, Any]:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Union[str, Any] ,*lowerCamelCase_: Any ,**lowerCamelCase_: Any ) -> Any:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: Dict ,*lowerCamelCase_: int ,**lowerCamelCase_: Optional[Any] ) -> int:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : int = ["flax"]
def __init__( self: Dict ,*lowerCamelCase_: Tuple ,**lowerCamelCase_: List[str] ) -> Optional[Any]:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Optional[Any] ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: str ) -> Any:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: Union[str, Any] ,*lowerCamelCase_: Dict ,**lowerCamelCase_: Optional[Any] ) -> str:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Optional[int] = ["flax"]
def __init__( self: str ,*lowerCamelCase_: Dict ,**lowerCamelCase_: Optional[int] ) -> Tuple:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: int ,*lowerCamelCase_: int ,**lowerCamelCase_: Tuple ) -> List[str]:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: str ,*lowerCamelCase_: Union[str, Any] ,**lowerCamelCase_: Optional[Any] ) -> Any:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : List[Any] = ["flax"]
def __init__( self: Union[str, Any] ,*lowerCamelCase_: Tuple ,**lowerCamelCase_: int ) -> List[Any]:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Tuple ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: Dict ) -> Dict:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: Dict ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: str ) -> Any:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Tuple = ["flax"]
def __init__( self: str ,*lowerCamelCase_: Any ,**lowerCamelCase_: int ) -> Tuple:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Dict ,*lowerCamelCase_: Optional[int] ,**lowerCamelCase_: Union[str, Any] ) -> List[str]:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: str ,*lowerCamelCase_: Union[str, Any] ,**lowerCamelCase_: Dict ) -> Optional[int]:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : str = ["flax"]
def __init__( self: Optional[Any] ,*lowerCamelCase_: str ,**lowerCamelCase_: List[str] ) -> Optional[Any]:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: List[str] ,*lowerCamelCase_: Dict ,**lowerCamelCase_: int ) -> List[str]:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: str ,*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: int ) -> Union[str, Any]:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Union[str, Any] = ["flax"]
def __init__( self: Any ,*lowerCamelCase_: Tuple ,**lowerCamelCase_: Optional[int] ) -> List[str]:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Optional[int] ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: str ) -> Union[str, Any]:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: List[Any] ,*lowerCamelCase_: Any ,**lowerCamelCase_: Any ) -> int:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Tuple = ["flax"]
def __init__( self: Any ,*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: Dict ) -> str:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Tuple ,*lowerCamelCase_: Union[str, Any] ,**lowerCamelCase_: List[str] ) -> int:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: List[Any] ,*lowerCamelCase_: str ,**lowerCamelCase_: str ) -> Any:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Optional[Any] = ["flax"]
def __init__( self: Dict ,*lowerCamelCase_: int ,**lowerCamelCase_: Optional[Any] ) -> Union[str, Any]:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: int ,*lowerCamelCase_: int ,**lowerCamelCase_: Tuple ) -> Union[str, Any]:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: Optional[Any] ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: Optional[int] ) -> int:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Optional[int] = ["flax"]
def __init__( self: List[str] ,*lowerCamelCase_: Dict ,**lowerCamelCase_: Dict ) -> int:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Dict ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: Dict ) -> Union[str, Any]:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: int ,*lowerCamelCase_: Any ,**lowerCamelCase_: Any ) -> Optional[Any]:
requires_backends(cls ,["""flax"""] )
| 345 | 1 |
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
UpperCamelCase_ = get_logger(__name__)
UpperCamelCase_ = Path(__file__).parent / '''model_card_template.md'''
UpperCamelCase_ = uuida().hex
UpperCamelCase_ = os.getenv('''HF_HUB_OFFLINE''', '''''').upper() in ENV_VARS_TRUE_VALUES
UpperCamelCase_ = os.getenv('''DISABLE_TELEMETRY''', '''''').upper() in ENV_VARS_TRUE_VALUES
UpperCamelCase_ = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '''/api/telemetry/'''
def lowerCamelCase_ ( _a : Union[Dict, str, None] = None ):
'''simple docstring'''
UpperCAmelCase_ : int = F'''diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}'''
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += F'''; torch/{_torch_version}'''
if is_flax_available():
ua += F'''; jax/{_jax_version}'''
ua += F'''; flax/{_flax_version}'''
if is_onnx_available():
ua += F'''; onnxruntime/{_onnxruntime_version}'''
# CI will set this value to True
if os.environ.get("""DIFFUSERS_IS_CI""" , """""" ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(_a , _a ):
ua += "; " + "; ".join(F'''{k}/{v}''' for k, v in user_agent.items() )
elif isinstance(_a , _a ):
ua += "; " + user_agent
return ua
def lowerCamelCase_ ( _a : str , _a : Optional[str] = None , _a : Optional[str] = None ):
'''simple docstring'''
if token is None:
UpperCAmelCase_ : int = HfFolder.get_token()
if organization is None:
UpperCAmelCase_ : Union[str, Any] = whoami(_a )["""name"""]
return F'''{username}/{model_id}'''
else:
return F'''{organization}/{model_id}'''
def lowerCamelCase_ ( _a : Tuple , _a : Union[str, Any] ):
'''simple docstring'''
if not is_jinja_available():
raise ValueError(
"""Modelcard rendering is based on Jinja templates."""
""" Please make sure to have `jinja` installed before using `create_model_card`."""
""" To install it, please run `pip install Jinja2`.""" )
if hasattr(_a , """local_rank""" ) and args.local_rank not in [-1, 0]:
return
UpperCAmelCase_ : List[str] = args.hub_token if hasattr(_a , """hub_token""" ) else None
UpperCAmelCase_ : Optional[int] = get_full_repo_name(_a , token=_a )
UpperCAmelCase_ : Tuple = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language="""en""" , license="""apache-2.0""" , library_name="""diffusers""" , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=_a , model_name=_a , repo_name=_a , dataset_name=args.dataset_name if hasattr(_a , """dataset_name""" ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(_a , """gradient_accumulation_steps""" ) else None
) , adam_betaa=args.adam_betaa if hasattr(_a , """adam_beta1""" ) else None , adam_betaa=args.adam_betaa if hasattr(_a , """adam_beta2""" ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(_a , """adam_weight_decay""" ) else None , adam_epsilon=args.adam_epsilon if hasattr(_a , """adam_epsilon""" ) else None , lr_scheduler=args.lr_scheduler if hasattr(_a , """lr_scheduler""" ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(_a , """lr_warmup_steps""" ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(_a , """ema_inv_gamma""" ) else None , ema_power=args.ema_power if hasattr(_a , """ema_power""" ) else None , ema_max_decay=args.ema_max_decay if hasattr(_a , """ema_max_decay""" ) else None , mixed_precision=args.mixed_precision , )
UpperCAmelCase_ : Any = os.path.join(args.output_dir , """README.md""" )
model_card.save(_a )
def lowerCamelCase_ ( _a : Optional[str] , _a : Optional[str] = None ):
'''simple docstring'''
if resolved_file is None or commit_hash is not None:
return commit_hash
UpperCAmelCase_ : Tuple = str(Path(_a ).as_posix() )
UpperCAmelCase_ : Union[str, Any] = re.search(r"""snapshots/([^/]+)/""" , _a )
if search is None:
return None
UpperCAmelCase_ : Any = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(_a ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
UpperCamelCase_ = os.path.expanduser(
os.getenv('''HF_HOME''', os.path.join(os.getenv('''XDG_CACHE_HOME''', '''~/.cache'''), '''huggingface'''))
)
UpperCamelCase_ = os.path.join(hf_cache_home, '''diffusers''')
def lowerCamelCase_ ( _a : Optional[str] = None , _a : Optional[str] = None ):
'''simple docstring'''
if new_cache_dir is None:
UpperCAmelCase_ : Union[str, Any] = DIFFUSERS_CACHE
if old_cache_dir is None:
UpperCAmelCase_ : Any = old_diffusers_cache
UpperCAmelCase_ : Dict = Path(_a ).expanduser()
UpperCAmelCase_ : Tuple = Path(_a ).expanduser()
for old_blob_path in old_cache_dir.glob("""**/blobs/*""" ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
UpperCAmelCase_ : int = new_cache_dir / old_blob_path.relative_to(_a )
new_blob_path.parent.mkdir(parents=_a , exist_ok=_a )
os.replace(_a , _a )
try:
os.symlink(_a , _a )
except OSError:
logger.warning(
"""Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.""" )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
UpperCamelCase_ = os.path.join(DIFFUSERS_CACHE, '''version_diffusers_cache.txt''')
if not os.path.isfile(cache_version_file):
UpperCamelCase_ = 0
else:
with open(cache_version_file) as f:
try:
UpperCamelCase_ = int(f.read())
except ValueError:
UpperCamelCase_ = 0
if cache_version < 1:
UpperCamelCase_ = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
'''The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your '''
'''existing cached models. This is a one-time operation, you can interrupt it or run it '''
'''later by calling `diffusers.utils.hub_utils.move_cache()`.'''
)
try:
move_cache()
except Exception as e:
UpperCamelCase_ = '''\n'''.join(traceback.format_tb(e.__traceback__))
logger.error(
F"There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease "
'''file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole '''
'''message and we will do our best to help.'''
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, '''w''') as f:
f.write('''1''')
except Exception:
logger.warning(
F"There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure "
'''the directory exists and can be written to.'''
)
def lowerCamelCase_ ( _a : str , _a : Optional[str] = None ):
'''simple docstring'''
if variant is not None:
UpperCAmelCase_ : str = weights_name.split(""".""" )
UpperCAmelCase_ : Dict = splits[:-1] + [variant] + splits[-1:]
UpperCAmelCase_ : Optional[Any] = """.""".join(_a )
return weights_name
def lowerCamelCase_ ( _a : List[Any] , *,
_a : Optional[Any] , _a : Tuple , _a : List[Any] , _a : int , _a : Optional[Any] , _a : Union[str, Any] , _a : List[Any] , _a : List[str] , _a : str , _a : Union[str, Any] , _a : Any=None , ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = str(_a )
if os.path.isfile(_a ):
return pretrained_model_name_or_path
elif os.path.isdir(_a ):
if os.path.isfile(os.path.join(_a , _a ) ):
# Load from a PyTorch checkpoint
UpperCAmelCase_ : str = os.path.join(_a , _a )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(_a , _a , _a ) ):
UpperCAmelCase_ : int = os.path.join(_a , _a , _a )
return model_file
else:
raise EnvironmentError(
F'''Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.''' )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(_a ).base_version ) >= version.parse("""0.20.0""" )
):
try:
UpperCAmelCase_ : Optional[int] = hf_hub_download(
_a , filename=_add_variant(_a , _a ) , cache_dir=_a , force_download=_a , proxies=_a , resume_download=_a , local_files_only=_a , use_auth_token=_a , user_agent=_a , subfolder=_a , revision=revision or commit_hash , )
warnings.warn(
F'''Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.''' , _a , )
return model_file
except: # noqa: E722
warnings.warn(
F'''You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(_a , _a )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(_a , _a )}\' so that the correct variant file can be added.''' , _a , )
try:
# 2. Load model file as usual
UpperCAmelCase_ : List[str] = hf_hub_download(
_a , filename=_a , cache_dir=_a , force_download=_a , proxies=_a , resume_download=_a , local_files_only=_a , use_auth_token=_a , user_agent=_a , subfolder=_a , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
F'''{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier '''
"""listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a """
"""token having permission to this repo with `use_auth_token` or log in with `huggingface-cli """
"""login`.""" )
except RevisionNotFoundError:
raise EnvironmentError(
F'''{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for '''
"""this model name. Check the model page at """
F'''\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.''' )
except EntryNotFoundError:
raise EnvironmentError(
F'''{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.''' )
except HTTPError as err:
raise EnvironmentError(
F'''There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}''' )
except ValueError:
raise EnvironmentError(
F'''We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it'''
F''' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a'''
F''' directory containing a file named {weights_name} or'''
""" \nCheckout your internet connection or see how to run the library in"""
""" offline mode at 'https://huggingface.co/docs/diffusers/installation#offline-mode'.""" )
except EnvironmentError:
raise EnvironmentError(
F'''Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from '''
"""'https://huggingface.co/models', make sure you don't have a local directory with the same name. """
F'''Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory '''
F'''containing a file named {weights_name}''' )
| 345 |
import random
from typing import Any
def lowerCamelCase_ ( _a : list ):
'''simple docstring'''
for _ in range(len(_a ) ):
UpperCAmelCase_ : Tuple = random.randint(0 , len(_a ) - 1 )
UpperCAmelCase_ : List[Any] = random.randint(0 , len(_a ) - 1 )
UpperCAmelCase_ , UpperCAmelCase_ : int = data[b], data[a]
return data
if __name__ == "__main__":
UpperCamelCase_ = [0, 1, 2, 3, 4, 5, 6, 7]
UpperCamelCase_ = ['''python''', '''says''', '''hello''', '''!''']
print('''Fisher-Yates Shuffle:''')
print('''List''', integers, strings)
print('''FY Shuffle''', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 345 | 1 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'''microsoft/layoutlmv3-base''': '''https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json''',
}
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : Optional[Any] = "layoutlmv3"
def __init__( self: str ,lowerCamelCase_: Any=50265 ,lowerCamelCase_: int=768 ,lowerCamelCase_: Any=12 ,lowerCamelCase_: Any=12 ,lowerCamelCase_: List[Any]=3072 ,lowerCamelCase_: str="gelu" ,lowerCamelCase_: List[str]=0.1 ,lowerCamelCase_: Any=0.1 ,lowerCamelCase_: Tuple=512 ,lowerCamelCase_: Union[str, Any]=2 ,lowerCamelCase_: Dict=0.0_2 ,lowerCamelCase_: List[str]=1e-5 ,lowerCamelCase_: int=1 ,lowerCamelCase_: int=0 ,lowerCamelCase_: List[str]=2 ,lowerCamelCase_: Dict=1024 ,lowerCamelCase_: Tuple=128 ,lowerCamelCase_: Tuple=128 ,lowerCamelCase_: Dict=True ,lowerCamelCase_: Union[str, Any]=32 ,lowerCamelCase_: Union[str, Any]=128 ,lowerCamelCase_: Tuple=64 ,lowerCamelCase_: Tuple=256 ,lowerCamelCase_: List[str]=True ,lowerCamelCase_: Optional[int]=True ,lowerCamelCase_: Any=True ,lowerCamelCase_: Dict=224 ,lowerCamelCase_: Optional[int]=3 ,lowerCamelCase_: Optional[int]=16 ,lowerCamelCase_: Dict=None ,**lowerCamelCase_: str ,) -> List[Any]:
super().__init__(
vocab_size=lowerCamelCase_ ,hidden_size=lowerCamelCase_ ,num_hidden_layers=lowerCamelCase_ ,num_attention_heads=lowerCamelCase_ ,intermediate_size=lowerCamelCase_ ,hidden_act=lowerCamelCase_ ,hidden_dropout_prob=lowerCamelCase_ ,attention_probs_dropout_prob=lowerCamelCase_ ,max_position_embeddings=lowerCamelCase_ ,type_vocab_size=lowerCamelCase_ ,initializer_range=lowerCamelCase_ ,layer_norm_eps=lowerCamelCase_ ,pad_token_id=lowerCamelCase_ ,bos_token_id=lowerCamelCase_ ,eos_token_id=lowerCamelCase_ ,**lowerCamelCase_ ,)
UpperCAmelCase_ : List[Any] = max_ad_position_embeddings
UpperCAmelCase_ : Optional[int] = coordinate_size
UpperCAmelCase_ : Optional[int] = shape_size
UpperCAmelCase_ : Optional[Any] = has_relative_attention_bias
UpperCAmelCase_ : Optional[int] = rel_pos_bins
UpperCAmelCase_ : Union[str, Any] = max_rel_pos
UpperCAmelCase_ : Dict = has_spatial_attention_bias
UpperCAmelCase_ : Optional[int] = rel_ad_pos_bins
UpperCAmelCase_ : Tuple = max_rel_ad_pos
UpperCAmelCase_ : Union[str, Any] = text_embed
UpperCAmelCase_ : Optional[Any] = visual_embed
UpperCAmelCase_ : List[str] = input_size
UpperCAmelCase_ : str = num_channels
UpperCAmelCase_ : Optional[int] = patch_size
UpperCAmelCase_ : Tuple = classifier_dropout
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : Optional[Any] = version.parse("1.12" )
@property
def A__ ( self: Dict ) -> Mapping[str, Mapping[int, str]]:
# The order of inputs is different for question answering and sequence classification
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
else:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels"""}),
] )
@property
def A__ ( self: Any ) -> float:
return 1e-5
@property
def A__ ( self: int ) -> int:
return 12
def A__ ( self: List[str] ,lowerCamelCase_: "ProcessorMixin" ,lowerCamelCase_: int = -1 ,lowerCamelCase_: int = -1 ,lowerCamelCase_: bool = False ,lowerCamelCase_: Optional["TensorType"] = None ,lowerCamelCase_: int = 3 ,lowerCamelCase_: int = 40 ,lowerCamelCase_: int = 40 ,) -> Mapping[str, Any]:
setattr(processor.image_processor ,"""apply_ocr""" ,lowerCamelCase_ )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCAmelCase_ : List[str] = compute_effective_axis_dimension(
lowerCamelCase_ ,fixed_dimension=OnnxConfig.default_fixed_batch ,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCAmelCase_ : int = processor.tokenizer.num_special_tokens_to_add(lowerCamelCase_ )
UpperCAmelCase_ : int = compute_effective_axis_dimension(
lowerCamelCase_ ,fixed_dimension=OnnxConfig.default_fixed_sequence ,num_token_to_add=lowerCamelCase_ )
# Generate dummy inputs according to compute batch and sequence
UpperCAmelCase_ : Optional[int] = [[""" """.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
UpperCAmelCase_ : List[Any] = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
UpperCAmelCase_ : Any = self._generate_dummy_images(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = dict(
processor(
lowerCamelCase_ ,text=lowerCamelCase_ ,boxes=lowerCamelCase_ ,return_tensors=lowerCamelCase_ ,) )
return inputs
| 345 |
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class _snake_case ( nn.Module ):
'''simple docstring'''
A__ : int
A__ : int
A__ : float = 0.0
A__ : int = 1
A__ : int = 1
A__ : bool = True
A__ : bool = False
A__ : bool = False
A__ : bool = False
A__ : jnp.dtype = jnp.floataa
def A__ ( self: Dict ) -> List[str]:
UpperCAmelCase_ : Optional[int] = []
UpperCAmelCase_ : Optional[int] = []
for i in range(self.num_layers ):
UpperCAmelCase_ : List[Any] = self.in_channels if i == 0 else self.out_channels
UpperCAmelCase_ : List[Any] = FlaxResnetBlockaD(
in_channels=lowerCamelCase_ ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = FlaxTransformeraDModel(
in_channels=self.out_channels ,n_heads=self.num_attention_heads ,d_head=self.out_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,only_cross_attention=self.only_cross_attention ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
attentions.append(lowerCamelCase_ )
UpperCAmelCase_ : int = resnets
UpperCAmelCase_ : Tuple = attentions
if self.add_downsample:
UpperCAmelCase_ : List[Any] = FlaxDownsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self: Optional[Any] ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: str ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: int=True ) -> int:
UpperCAmelCase_ : List[Any] = ()
for resnet, attn in zip(self.resnets ,self.attentions ):
UpperCAmelCase_ : str = resnet(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = attn(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
output_states += (hidden_states,)
if self.add_downsample:
UpperCAmelCase_ : List[Any] = self.downsamplers_a(lowerCamelCase_ )
output_states += (hidden_states,)
return hidden_states, output_states
class _snake_case ( nn.Module ):
'''simple docstring'''
A__ : int
A__ : int
A__ : float = 0.0
A__ : int = 1
A__ : bool = True
A__ : jnp.dtype = jnp.floataa
def A__ ( self: Dict ) -> int:
UpperCAmelCase_ : List[str] = []
for i in range(self.num_layers ):
UpperCAmelCase_ : int = self.in_channels if i == 0 else self.out_channels
UpperCAmelCase_ : Dict = FlaxResnetBlockaD(
in_channels=lowerCamelCase_ ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = resnets
if self.add_downsample:
UpperCAmelCase_ : List[str] = FlaxDownsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self: Any ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Any ,lowerCamelCase_: List[Any]=True ) -> Any:
UpperCAmelCase_ : Union[str, Any] = ()
for resnet in self.resnets:
UpperCAmelCase_ : Tuple = resnet(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
output_states += (hidden_states,)
if self.add_downsample:
UpperCAmelCase_ : List[str] = self.downsamplers_a(lowerCamelCase_ )
output_states += (hidden_states,)
return hidden_states, output_states
class _snake_case ( nn.Module ):
'''simple docstring'''
A__ : int
A__ : int
A__ : int
A__ : float = 0.0
A__ : int = 1
A__ : int = 1
A__ : bool = True
A__ : bool = False
A__ : bool = False
A__ : bool = False
A__ : jnp.dtype = jnp.floataa
def A__ ( self: str ) -> Any:
UpperCAmelCase_ : Dict = []
UpperCAmelCase_ : List[str] = []
for i in range(self.num_layers ):
UpperCAmelCase_ : int = self.in_channels if (i == self.num_layers - 1) else self.out_channels
UpperCAmelCase_ : int = self.prev_output_channel if i == 0 else self.out_channels
UpperCAmelCase_ : Optional[Any] = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(lowerCamelCase_ )
UpperCAmelCase_ : int = FlaxTransformeraDModel(
in_channels=self.out_channels ,n_heads=self.num_attention_heads ,d_head=self.out_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,only_cross_attention=self.only_cross_attention ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
attentions.append(lowerCamelCase_ )
UpperCAmelCase_ : List[str] = resnets
UpperCAmelCase_ : Dict = attentions
if self.add_upsample:
UpperCAmelCase_ : Optional[Any] = FlaxUpsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self: Optional[int] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: int ,lowerCamelCase_: Any ,lowerCamelCase_: str ,lowerCamelCase_: List[str]=True ) -> List[str]:
for resnet, attn in zip(self.resnets ,self.attentions ):
# pop res hidden states
UpperCAmelCase_ : List[str] = res_hidden_states_tuple[-1]
UpperCAmelCase_ : Union[str, Any] = res_hidden_states_tuple[:-1]
UpperCAmelCase_ : Optional[Any] = jnp.concatenate((hidden_states, res_hidden_states) ,axis=-1 )
UpperCAmelCase_ : Tuple = resnet(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = attn(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
if self.add_upsample:
UpperCAmelCase_ : Dict = self.upsamplers_a(lowerCamelCase_ )
return hidden_states
class _snake_case ( nn.Module ):
'''simple docstring'''
A__ : int
A__ : int
A__ : int
A__ : float = 0.0
A__ : int = 1
A__ : bool = True
A__ : jnp.dtype = jnp.floataa
def A__ ( self: Dict ) -> Dict:
UpperCAmelCase_ : Any = []
for i in range(self.num_layers ):
UpperCAmelCase_ : str = self.in_channels if (i == self.num_layers - 1) else self.out_channels
UpperCAmelCase_ : Optional[int] = self.prev_output_channel if i == 0 else self.out_channels
UpperCAmelCase_ : Any = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(lowerCamelCase_ )
UpperCAmelCase_ : str = resnets
if self.add_upsample:
UpperCAmelCase_ : Union[str, Any] = FlaxUpsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self: Dict ,lowerCamelCase_: Dict ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Any=True ) -> List[str]:
for resnet in self.resnets:
# pop res hidden states
UpperCAmelCase_ : Dict = res_hidden_states_tuple[-1]
UpperCAmelCase_ : str = res_hidden_states_tuple[:-1]
UpperCAmelCase_ : List[Any] = jnp.concatenate((hidden_states, res_hidden_states) ,axis=-1 )
UpperCAmelCase_ : List[str] = resnet(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
if self.add_upsample:
UpperCAmelCase_ : Optional[Any] = self.upsamplers_a(lowerCamelCase_ )
return hidden_states
class _snake_case ( nn.Module ):
'''simple docstring'''
A__ : int
A__ : float = 0.0
A__ : int = 1
A__ : int = 1
A__ : bool = False
A__ : bool = False
A__ : jnp.dtype = jnp.floataa
def A__ ( self: Dict ) -> List[str]:
# there is always at least one resnet
UpperCAmelCase_ : List[Any] = [
FlaxResnetBlockaD(
in_channels=self.in_channels ,out_channels=self.in_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
]
UpperCAmelCase_ : Any = []
for _ in range(self.num_layers ):
UpperCAmelCase_ : Optional[Any] = FlaxTransformeraDModel(
in_channels=self.in_channels ,n_heads=self.num_attention_heads ,d_head=self.in_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
attentions.append(lowerCamelCase_ )
UpperCAmelCase_ : Any = FlaxResnetBlockaD(
in_channels=self.in_channels ,out_channels=self.in_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(lowerCamelCase_ )
UpperCAmelCase_ : Dict = resnets
UpperCAmelCase_ : Any = attentions
def __call__( self: str ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: str ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Union[str, Any]=True ) -> List[Any]:
UpperCAmelCase_ : List[Any] = self.resnets[0](lowerCamelCase_ ,lowerCamelCase_ )
for attn, resnet in zip(self.attentions ,self.resnets[1:] ):
UpperCAmelCase_ : Optional[Any] = attn(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = resnet(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
return hidden_states
| 345 | 1 |
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase_ = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
UpperCamelCase_ = 250004
UpperCamelCase_ = 250020
@require_sentencepiece
@require_tokenizers
class _snake_case ( __snake_case , unittest.TestCase ):
'''simple docstring'''
A__ : Tuple = MBartaaTokenizer
A__ : List[Any] = MBartaaTokenizerFast
A__ : Optional[Any] = True
A__ : Tuple = True
def A__ ( self: int ) -> Optional[int]:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase_ : Union[str, Any] = MBartaaTokenizer(lowerCamelCase_ ,src_lang="""en_XX""" ,tgt_lang="""ro_RO""" ,keep_accents=lowerCamelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def A__ ( self: Optional[int] ) -> str:
UpperCAmelCase_ : str = """<s>"""
UpperCAmelCase_ : List[str] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase_ ) ,lowerCamelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase_ ) ,lowerCamelCase_ )
def A__ ( self: Dict ) -> Tuple:
UpperCAmelCase_ : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,"""<s>""" )
self.assertEqual(vocab_keys[1] ,"""<pad>""" )
self.assertEqual(vocab_keys[-1] ,"""<mask>""" )
self.assertEqual(len(lowerCamelCase_ ) ,1054 )
def A__ ( self: int ) -> List[Any]:
self.assertEqual(self.get_tokenizer().vocab_size ,1054 )
def A__ ( self: int ) -> str:
UpperCAmelCase_ : str = MBartaaTokenizer(lowerCamelCase_ ,src_lang="""en_XX""" ,tgt_lang="""ro_RO""" ,keep_accents=lowerCamelCase_ )
UpperCAmelCase_ : Dict = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(lowerCamelCase_ ,["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) ,[value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] ,)
UpperCAmelCase_ : List[Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
lowerCamelCase_ ,[SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """."""] ,)
UpperCAmelCase_ : int = tokenizer.convert_tokens_to_ids(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_ ,[
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] ,)
UpperCAmelCase_ : List[str] = tokenizer.convert_ids_to_tokens(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_ ,[SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """."""] ,)
@slow
def A__ ( self: Optional[Any] ) -> Dict:
# fmt: off
UpperCAmelCase_ : int = {"""input_ids""": [[250004, 11062, 82772, 7, 15, 82772, 538, 51529, 237, 17198, 1290, 206, 9, 215175, 1314, 136, 17198, 1290, 206, 9, 56359, 42, 122009, 9, 16466, 16, 87344, 4537, 9, 4717, 78381, 6, 159958, 7, 15, 24480, 618, 4, 527, 22693, 5428, 4, 2777, 24480, 9874, 4, 43523, 594, 4, 803, 18392, 33189, 18, 4, 43523, 24447, 12399, 100, 24955, 83658, 9626, 144057, 15, 839, 22335, 16, 136, 24955, 83658, 83479, 15, 39102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 122009, 115774, 23, 805, 1328, 46876, 7, 136, 53894, 1940, 42227, 41159, 17721, 823, 425, 4, 27512, 98722, 206, 136, 5531, 4970, 919, 17336, 5, 2], [250004, 20080, 618, 83, 82775, 47, 479, 9, 1517, 73, 53894, 333, 80581, 110117, 18811, 5256, 1295, 51, 152526, 297, 7986, 390, 124416, 538, 35431, 214, 98, 15044, 25737, 136, 7108, 43701, 23, 756, 135355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [250004, 581, 63773, 119455, 6, 147797, 88203, 7, 645, 70, 21, 3285, 10269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase_ ,model_name="""facebook/mbart-large-50""" ,revision="""d3913889c59cd5c9e456b269c376325eabad57e2""" ,)
def A__ ( self: Optional[int] ) -> int:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
UpperCAmelCase_ : int = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-mbart50""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCAmelCase_ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(lowerCamelCase_ ,**lowerCamelCase_ )
UpperCAmelCase_ : Dict = self.tokenizer_class.from_pretrained(lowerCamelCase_ ,**lowerCamelCase_ )
UpperCAmelCase_ : str = tempfile.mkdtemp()
UpperCAmelCase_ : List[str] = tokenizer_r.save_pretrained(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = tokenizer_p.save_pretrained(lowerCamelCase_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
UpperCAmelCase_ : Union[str, Any] = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(lowerCamelCase_ ,lowerCamelCase_ )
# Checks everything loads correctly in the same way
UpperCAmelCase_ : Any = tokenizer_r.from_pretrained(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = tokenizer_p.from_pretrained(lowerCamelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase_ ,lowerCamelCase_ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowerCamelCase_ )
# Save tokenizer rust, legacy_format=True
UpperCAmelCase_ : Any = tempfile.mkdtemp()
UpperCAmelCase_ : List[str] = tokenizer_r.save_pretrained(lowerCamelCase_ ,legacy_format=lowerCamelCase_ )
UpperCAmelCase_ : Dict = tokenizer_p.save_pretrained(lowerCamelCase_ )
# Checks it save with the same files
self.assertSequenceEqual(lowerCamelCase_ ,lowerCamelCase_ )
# Checks everything loads correctly in the same way
UpperCAmelCase_ : List[Any] = tokenizer_r.from_pretrained(lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = tokenizer_p.from_pretrained(lowerCamelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase_ ,lowerCamelCase_ ) )
shutil.rmtree(lowerCamelCase_ )
# Save tokenizer rust, legacy_format=False
UpperCAmelCase_ : str = tempfile.mkdtemp()
UpperCAmelCase_ : Optional[int] = tokenizer_r.save_pretrained(lowerCamelCase_ ,legacy_format=lowerCamelCase_ )
UpperCAmelCase_ : List[str] = tokenizer_p.save_pretrained(lowerCamelCase_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
UpperCAmelCase_ : Dict = tokenizer_r.from_pretrained(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = tokenizer_p.from_pretrained(lowerCamelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase_ ,lowerCamelCase_ ) )
shutil.rmtree(lowerCamelCase_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
A__ : Optional[Any] = "facebook/mbart-large-50-one-to-many-mmt"
A__ : Union[str, Any] = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.",
]
A__ : List[Any] = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"
" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"
" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
A__ : int = [EN_CODE, 8_274, 127_873, 25_916, 7, 8_622, 2_071, 438, 67_485, 53, 187_895, 23, 51_712, 2]
@classmethod
def A__ ( cls: Dict ) -> Union[str, Any]:
UpperCAmelCase_ : MBartaaTokenizer = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name ,src_lang="""en_XX""" ,tgt_lang="""ro_RO""" )
UpperCAmelCase_ : List[str] = 1
return cls
def A__ ( self: List[Any] ) -> Any:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ar_AR"""] ,250001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""en_EN"""] ,250004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ro_RO"""] ,250020 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""mr_IN"""] ,250038 )
def A__ ( self: Optional[Any] ) -> Optional[Any]:
UpperCAmelCase_ : Optional[int] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens ,lowerCamelCase_ )
def A__ ( self: List[Any] ) -> Optional[int]:
self.assertIn(lowerCamelCase_ ,self.tokenizer.all_special_ids )
UpperCAmelCase_ : int = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2]
UpperCAmelCase_ : Optional[int] = self.tokenizer.decode(lowerCamelCase_ ,skip_special_tokens=lowerCamelCase_ )
UpperCAmelCase_ : List[str] = self.tokenizer.decode(generated_ids[1:] ,skip_special_tokens=lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ ,lowerCamelCase_ )
self.assertNotIn(self.tokenizer.eos_token ,lowerCamelCase_ )
def A__ ( self: List[Any] ) -> List[Any]:
UpperCAmelCase_ : Any = ["""this is gunna be a long sentence """ * 20]
assert isinstance(src_text[0] ,lowerCamelCase_ )
UpperCAmelCase_ : List[str] = 10
UpperCAmelCase_ : List[str] = self.tokenizer(lowerCamelCase_ ,max_length=lowerCamelCase_ ,truncation=lowerCamelCase_ ).input_ids[0]
self.assertEqual(ids[0] ,lowerCamelCase_ )
self.assertEqual(ids[-1] ,2 )
self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ )
def A__ ( self: Optional[Any] ) -> List[Any]:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) ,[250053, 250001] )
def A__ ( self: int ) -> str:
UpperCAmelCase_ : Union[str, Any] = tempfile.mkdtemp()
UpperCAmelCase_ : Dict = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowerCamelCase_ )
UpperCAmelCase_ : Tuple = MBartaaTokenizer.from_pretrained(lowerCamelCase_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids ,lowerCamelCase_ )
@require_torch
def A__ ( self: Union[str, Any] ) -> int:
UpperCAmelCase_ : List[str] = self.tokenizer(self.src_text ,text_target=self.tgt_text ,padding=lowerCamelCase_ ,return_tensors="""pt""" )
UpperCAmelCase_ : List[str] = shift_tokens_right(batch["""labels"""] ,self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def A__ ( self: Tuple ) -> Any:
UpperCAmelCase_ : List[str] = self.tokenizer(
self.src_text ,text_target=self.tgt_text ,padding=lowerCamelCase_ ,truncation=lowerCamelCase_ ,max_length=len(self.expected_src_tokens ) ,return_tensors="""pt""" ,)
UpperCAmelCase_ : Any = shift_tokens_right(batch["""labels"""] ,self.tokenizer.pad_token_id )
self.assertIsInstance(lowerCamelCase_ ,lowerCamelCase_ )
self.assertEqual((2, 14) ,batch.input_ids.shape )
self.assertEqual((2, 14) ,batch.attention_mask.shape )
UpperCAmelCase_ : Tuple = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens ,lowerCamelCase_ )
self.assertEqual(2 ,batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens ,[EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens ,[self.tokenizer.eos_token_id] )
def A__ ( self: List[Any] ) -> Any:
UpperCAmelCase_ : Union[str, Any] = self.tokenizer(self.src_text ,padding=lowerCamelCase_ ,truncation=lowerCamelCase_ ,max_length=3 ,return_tensors="""pt""" )
UpperCAmelCase_ : str = self.tokenizer(
text_target=self.tgt_text ,padding=lowerCamelCase_ ,truncation=lowerCamelCase_ ,max_length=10 ,return_tensors="""pt""" )
UpperCAmelCase_ : Optional[Any] = targets["""input_ids"""]
UpperCAmelCase_ : str = shift_tokens_right(lowerCamelCase_ ,self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] ,3 )
self.assertEqual(batch.decoder_input_ids.shape[1] ,10 )
@require_torch
def A__ ( self: Optional[Any] ) -> Union[str, Any]:
UpperCAmelCase_ : List[str] = self.tokenizer._build_translation_inputs(
"""A test""" ,return_tensors="""pt""" ,src_lang="""en_XX""" ,tgt_lang="""ar_AR""" )
self.assertEqual(
nested_simplify(lowerCamelCase_ ) ,{
# en_XX, A, test, EOS
"""input_ids""": [[250004, 62, 3034, 2]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 250001,
} ,)
| 345 |
import pickle
import numpy as np
from matplotlib import pyplot as plt
class _snake_case :
'''simple docstring'''
def __init__( self: Any ,lowerCamelCase_: Dict ,lowerCamelCase_: Tuple ,lowerCamelCase_: Dict ,lowerCamelCase_: Tuple ,lowerCamelCase_: Any ,lowerCamelCase_: Tuple=0.2 ,lowerCamelCase_: Union[str, Any]=0.2 ) -> List[str]:
UpperCAmelCase_ : List[Any] = bp_numa
UpperCAmelCase_ : str = bp_numa
UpperCAmelCase_ : List[Any] = bp_numa
UpperCAmelCase_ : Optional[int] = conva_get[:2]
UpperCAmelCase_ : List[Any] = conva_get[2]
UpperCAmelCase_ : str = size_pa
UpperCAmelCase_ : Optional[int] = rate_w
UpperCAmelCase_ : Dict = rate_t
UpperCAmelCase_ : List[Any] = [
np.mat(-1 * np.random.rand(self.conva[0] ,self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
UpperCAmelCase_ : int = np.mat(-1 * np.random.rand(self.num_bpa ,self.num_bpa ) + 0.5 )
UpperCAmelCase_ : int = np.mat(-1 * np.random.rand(self.num_bpa ,self.num_bpa ) + 0.5 )
UpperCAmelCase_ : Dict = -2 * np.random.rand(self.conva[1] ) + 1
UpperCAmelCase_ : str = -2 * np.random.rand(self.num_bpa ) + 1
UpperCAmelCase_ : Union[str, Any] = -2 * np.random.rand(self.num_bpa ) + 1
def A__ ( self: str ,lowerCamelCase_: Optional[Any] ) -> Tuple:
# save model dict with pickle
UpperCAmelCase_ : Dict = {
"""num_bp1""": self.num_bpa,
"""num_bp2""": self.num_bpa,
"""num_bp3""": self.num_bpa,
"""conv1""": self.conva,
"""step_conv1""": self.step_conva,
"""size_pooling1""": self.size_poolinga,
"""rate_weight""": self.rate_weight,
"""rate_thre""": self.rate_thre,
"""w_conv1""": self.w_conva,
"""wkj""": self.wkj,
"""vji""": self.vji,
"""thre_conv1""": self.thre_conva,
"""thre_bp2""": self.thre_bpa,
"""thre_bp3""": self.thre_bpa,
}
with open(lowerCamelCase_ ,"""wb""" ) as f:
pickle.dump(lowerCamelCase_ ,lowerCamelCase_ )
print(F'''Model saved: {save_path}''' )
@classmethod
def A__ ( cls: List[str] ,lowerCamelCase_: str ) -> List[str]:
# read saved model
with open(lowerCamelCase_ ,"""rb""" ) as f:
UpperCAmelCase_ : Any = pickle.load(lowerCamelCase_ ) # noqa: S301
UpperCAmelCase_ : Union[str, Any] = model_dic.get("""conv1""" )
conv_get.append(model_dic.get("""step_conv1""" ) )
UpperCAmelCase_ : List[str] = model_dic.get("""size_pooling1""" )
UpperCAmelCase_ : Tuple = model_dic.get("""num_bp1""" )
UpperCAmelCase_ : Optional[Any] = model_dic.get("""num_bp2""" )
UpperCAmelCase_ : List[str] = model_dic.get("""num_bp3""" )
UpperCAmelCase_ : List[Any] = model_dic.get("""rate_weight""" )
UpperCAmelCase_ : Dict = model_dic.get("""rate_thre""" )
# create model instance
UpperCAmelCase_ : List[Any] = CNN(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
# modify model parameter
UpperCAmelCase_ : Any = model_dic.get("""w_conv1""" )
UpperCAmelCase_ : int = model_dic.get("""wkj""" )
UpperCAmelCase_ : int = model_dic.get("""vji""" )
UpperCAmelCase_ : Optional[int] = model_dic.get("""thre_conv1""" )
UpperCAmelCase_ : List[str] = model_dic.get("""thre_bp2""" )
UpperCAmelCase_ : Dict = model_dic.get("""thre_bp3""" )
return conv_ins
def A__ ( self: List[Any] ,lowerCamelCase_: Union[str, Any] ) -> Tuple:
return 1 / (1 + np.exp(-1 * x ))
def A__ ( self: Union[str, Any] ,lowerCamelCase_: Union[str, Any] ) -> Optional[Any]:
return round(lowerCamelCase_ ,3 )
def A__ ( self: Tuple ,lowerCamelCase_: Any ,lowerCamelCase_: List[str] ,lowerCamelCase_: str ,lowerCamelCase_: Any ,lowerCamelCase_: Union[str, Any] ) -> Any:
# convolution process
UpperCAmelCase_ : Optional[Any] = convs[0]
UpperCAmelCase_ : int = convs[1]
UpperCAmelCase_ : int = np.shape(lowerCamelCase_ )[0]
# get the data slice of original image data, data_focus
UpperCAmelCase_ : Dict = []
for i_focus in range(0 ,size_data - size_conv + 1 ,lowerCamelCase_ ):
for j_focus in range(0 ,size_data - size_conv + 1 ,lowerCamelCase_ ):
UpperCAmelCase_ : Union[str, Any] = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(lowerCamelCase_ )
# calculate the feature map of every single kernel, and saved as list of matrix
UpperCAmelCase_ : Any = []
UpperCAmelCase_ : Tuple = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(lowerCamelCase_ ):
UpperCAmelCase_ : Optional[int] = []
for i_focus in range(len(lowerCamelCase_ ) ):
UpperCAmelCase_ : int = (
np.sum(np.multiply(data_focus[i_focus] ,w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(lowerCamelCase_ ) )
UpperCAmelCase_ : Union[str, Any] = np.asmatrix(lowerCamelCase_ ).reshape(
lowerCamelCase_ ,lowerCamelCase_ )
data_featuremap.append(lowerCamelCase_ )
# expanding the data slice to One dimenssion
UpperCAmelCase_ : Optional[Any] = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(lowerCamelCase_ ) )
UpperCAmelCase_ : Optional[int] = np.asarray(lowerCamelCase_ )
return focus_list, data_featuremap
def A__ ( self: Tuple ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Optional[Any]="average_pool" ) -> List[Any]:
# pooling process
UpperCAmelCase_ : Optional[Any] = len(featuremaps[0] )
UpperCAmelCase_ : Any = int(size_map / size_pooling )
UpperCAmelCase_ : Optional[int] = []
for i_map in range(len(lowerCamelCase_ ) ):
UpperCAmelCase_ : Any = featuremaps[i_map]
UpperCAmelCase_ : Tuple = []
for i_focus in range(0 ,lowerCamelCase_ ,lowerCamelCase_ ):
for j_focus in range(0 ,lowerCamelCase_ ,lowerCamelCase_ ):
UpperCAmelCase_ : str = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(lowerCamelCase_ ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(lowerCamelCase_ ) )
UpperCAmelCase_ : int = np.asmatrix(lowerCamelCase_ ).reshape(lowerCamelCase_ ,lowerCamelCase_ )
featuremap_pooled.append(lowerCamelCase_ )
return featuremap_pooled
def A__ ( self: Union[str, Any] ,lowerCamelCase_: Tuple ) -> Optional[int]:
# expanding three dimension data to one dimension list
UpperCAmelCase_ : List[Any] = []
for i in range(len(lowerCamelCase_ ) ):
UpperCAmelCase_ : Tuple = np.shape(data[i] )
UpperCAmelCase_ : Optional[int] = data[i].reshape(1 ,shapes[0] * shapes[1] )
UpperCAmelCase_ : Optional[int] = data_listed.getA().tolist()[0]
data_expanded.extend(lowerCamelCase_ )
UpperCAmelCase_ : int = np.asarray(lowerCamelCase_ )
return data_expanded
def A__ ( self: Optional[Any] ,lowerCamelCase_: Optional[int] ) -> Union[str, Any]:
# expanding matrix to one dimension list
UpperCAmelCase_ : List[Any] = np.asarray(lowerCamelCase_ )
UpperCAmelCase_ : str = np.shape(lowerCamelCase_ )
UpperCAmelCase_ : Dict = data_mat.reshape(1 ,shapes[0] * shapes[1] )
return data_expanded
def A__ ( self: str ,lowerCamelCase_: Dict ,lowerCamelCase_: int ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Any ) -> Union[str, Any]:
UpperCAmelCase_ : Any = []
UpperCAmelCase_ : Tuple = 0
for i_map in range(lowerCamelCase_ ):
UpperCAmelCase_ : Optional[Any] = np.ones((size_map, size_map) )
for i in range(0 ,lowerCamelCase_ ,lowerCamelCase_ ):
for j in range(0 ,lowerCamelCase_ ,lowerCamelCase_ ):
UpperCAmelCase_ : Any = pd_pool[
i_pool
]
UpperCAmelCase_ : List[str] = i_pool + 1
UpperCAmelCase_ : Optional[Any] = np.multiply(
lowerCamelCase_ ,np.multiply(out_map[i_map] ,(1 - out_map[i_map]) ) )
pd_all.append(lowerCamelCase_ )
return pd_all
def A__ ( self: str ,lowerCamelCase_: int ,lowerCamelCase_: int ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Any ,lowerCamelCase_: List[str] ,lowerCamelCase_: Any=bool ) -> Optional[int]:
# model traning
print("""----------------------Start Training-------------------------""" )
print((""" - - Shape: Train_Data """, np.shape(lowerCamelCase_ )) )
print((""" - - Shape: Teach_Data """, np.shape(lowerCamelCase_ )) )
UpperCAmelCase_ : str = 0
UpperCAmelCase_ : Tuple = []
UpperCAmelCase_ : Any = 10000
while rp < n_repeat and mse >= error_accuracy:
UpperCAmelCase_ : List[str] = 0
print(F'''-------------Learning Time {rp}--------------''' )
for p in range(len(lowerCamelCase_ ) ):
# print('------------Learning Image: %d--------------'%p)
UpperCAmelCase_ : str = np.asmatrix(datas_train[p] )
UpperCAmelCase_ : Optional[Any] = np.asarray(datas_teach[p] )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.convolute(
lowerCamelCase_ ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,)
UpperCAmelCase_ : List[Any] = self.pooling(lowerCamelCase_ ,self.size_poolinga )
UpperCAmelCase_ : int = np.shape(lowerCamelCase_ )
UpperCAmelCase_ : Dict = self._expand(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = data_bp_input
UpperCAmelCase_ : Optional[Any] = np.dot(lowerCamelCase_ ,self.vji.T ) - self.thre_bpa
UpperCAmelCase_ : int = self.sig(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = np.dot(lowerCamelCase_ ,self.wkj.T ) - self.thre_bpa
UpperCAmelCase_ : Optional[Any] = self.sig(lowerCamelCase_ )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
UpperCAmelCase_ : List[str] = np.multiply(
(data_teach - bp_outa) ,np.multiply(lowerCamelCase_ ,(1 - bp_outa) ) )
UpperCAmelCase_ : List[Any] = np.multiply(
np.dot(lowerCamelCase_ ,self.wkj ) ,np.multiply(lowerCamelCase_ ,(1 - bp_outa) ) )
UpperCAmelCase_ : Any = np.dot(lowerCamelCase_ ,self.vji )
UpperCAmelCase_ : Tuple = pd_i_all / (self.size_poolinga * self.size_poolinga)
UpperCAmelCase_ : List[str] = pd_conva_pooled.T.getA().tolist()
UpperCAmelCase_ : str = self._calculate_gradient_from_pool(
lowerCamelCase_ ,lowerCamelCase_ ,shape_featuremapa[0] ,shape_featuremapa[1] ,self.size_poolinga ,)
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
UpperCAmelCase_ : List[str] = self._expand_mat(pd_conva_all[k_conv] )
UpperCAmelCase_ : Optional[Any] = self.rate_weight * np.dot(lowerCamelCase_ ,lowerCamelCase_ )
UpperCAmelCase_ : int = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
UpperCAmelCase_ : str = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
UpperCAmelCase_ : int = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
UpperCAmelCase_ : Tuple = self.vji + pd_j_all.T * bp_outa * self.rate_weight
UpperCAmelCase_ : int = self.thre_bpa - pd_k_all * self.rate_thre
UpperCAmelCase_ : str = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
UpperCAmelCase_ : int = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
UpperCAmelCase_ : int = rp + 1
UpperCAmelCase_ : Any = error_count / patterns
all_mse.append(lowerCamelCase_ )
def draw_error():
UpperCAmelCase_ : Any = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(lowerCamelCase_ ,"""+-""" )
plt.plot(lowerCamelCase_ ,"""r--""" )
plt.xlabel("""Learning Times""" )
plt.ylabel("""All_mse""" )
plt.grid(lowerCamelCase_ ,alpha=0.5 )
plt.show()
print("""------------------Training Complished---------------------""" )
print((""" - - Training epoch: """, rp, F''' - - Mse: {mse:.6f}''') )
if draw_e:
draw_error()
return mse
def A__ ( self: Optional[int] ,lowerCamelCase_: Any ) -> Tuple:
# model predict
UpperCAmelCase_ : Union[str, Any] = []
print("""-------------------Start Testing-------------------------""" )
print((""" - - Shape: Test_Data """, np.shape(lowerCamelCase_ )) )
for p in range(len(lowerCamelCase_ ) ):
UpperCAmelCase_ : int = np.asmatrix(datas_test[p] )
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.convolute(
lowerCamelCase_ ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,)
UpperCAmelCase_ : Optional[Any] = self.pooling(lowerCamelCase_ ,self.size_poolinga )
UpperCAmelCase_ : str = self._expand(lowerCamelCase_ )
UpperCAmelCase_ : str = data_bp_input
UpperCAmelCase_ : Union[str, Any] = bp_outa * self.vji.T - self.thre_bpa
UpperCAmelCase_ : Optional[int] = self.sig(lowerCamelCase_ )
UpperCAmelCase_ : Tuple = bp_outa * self.wkj.T - self.thre_bpa
UpperCAmelCase_ : List[Any] = self.sig(lowerCamelCase_ )
produce_out.extend(bp_outa.getA().tolist() )
UpperCAmelCase_ : int = [list(map(self.do_round ,lowerCamelCase_ ) ) for each in produce_out]
return np.asarray(lowerCamelCase_ )
def A__ ( self: Optional[Any] ,lowerCamelCase_: Dict ) -> Tuple:
# return the data of image after convoluting process so we can check it out
UpperCAmelCase_ : Optional[int] = np.asmatrix(lowerCamelCase_ )
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self.convolute(
lowerCamelCase_ ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,)
UpperCAmelCase_ : Dict = self.pooling(lowerCamelCase_ ,self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 345 | 1 |
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def lowerCamelCase_ ( _a : float , _a : float , _a : bool = False ):
'''simple docstring'''
if radian_mode:
return [magnitude * cos(_a ), magnitude * sin(_a )]
return [magnitude * cos(radians(_a ) ), magnitude * sin(radians(_a ) )]
def lowerCamelCase_ ( _a : NDArray[floataa] , _a : NDArray[floataa] , _a : float = 10**-1 ):
'''simple docstring'''
UpperCAmelCase_ : NDArray[floataa] = cross(_a , _a )
UpperCAmelCase_ : float = sum(_a )
return abs(_a ) < eps
if __name__ == "__main__":
# Test to check if it works
UpperCamelCase_ = array(
[
polar_force(7_1_8.4, 180 - 30),
polar_force(8_7_9.5_4, 45),
polar_force(100, -90),
]
)
UpperCamelCase_ = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
UpperCamelCase_ = array(
[
polar_force(30 * 9.8_1, 15),
polar_force(215, 180 - 45),
polar_force(264, 90 - 30),
]
)
UpperCamelCase_ = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
UpperCamelCase_ = array([[0, -2000], [0, -1200], [0, 15600], [0, -12400]])
UpperCamelCase_ = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 345 |
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _snake_case ( __snake_case , unittest.TestCase ):
'''simple docstring'''
A__ : Optional[Any] = CTRLTokenizer
A__ : Optional[Any] = False
A__ : str = False
def A__ ( self: Optional[int] ) -> List[Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase_ : Dict = ["""adapt""", """re@@""", """a@@""", """apt""", """c@@""", """t""", """<unk>"""]
UpperCAmelCase_ : Union[str, Any] = dict(zip(lowerCamelCase_ ,range(len(lowerCamelCase_ ) ) ) )
UpperCAmelCase_ : List[Any] = ["""#version: 0.2""", """a p""", """ap t</w>""", """r e""", """a d""", """ad apt</w>""", """"""]
UpperCAmelCase_ : Optional[Any] = {"""unk_token""": """<unk>"""}
UpperCAmelCase_ : Union[str, Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCAmelCase_ : Optional[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCamelCase_ ) + """\n""" )
with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowerCamelCase_ ) )
def A__ ( self: Optional[int] ,**lowerCamelCase_: Any ) -> str:
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname ,**lowerCamelCase_ )
def A__ ( self: int ,lowerCamelCase_: int ) -> str:
UpperCAmelCase_ : List[str] = """adapt react readapt apt"""
UpperCAmelCase_ : List[Any] = """adapt react readapt apt"""
return input_text, output_text
def A__ ( self: Union[str, Any] ) -> Optional[int]:
UpperCAmelCase_ : Union[str, Any] = CTRLTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
UpperCAmelCase_ : List[Any] = """adapt react readapt apt"""
UpperCAmelCase_ : Optional[int] = """adapt re@@ a@@ c@@ t re@@ adapt apt""".split()
UpperCAmelCase_ : Tuple = tokenizer.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ ,lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = tokens + [tokenizer.unk_token]
UpperCAmelCase_ : List[str] = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) ,lowerCamelCase_ )
| 345 | 1 |
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {'''vocab_file''': '''vocab.json'''}
UpperCamelCase_ = {
'''vocab_file''': {
'''mgp-str''': '''https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json''',
}
}
UpperCamelCase_ = {'''mgp-str''': 27}
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : Any = VOCAB_FILES_NAMES
A__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
A__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self: int ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Union[str, Any]="[GO]" ,lowerCamelCase_: List[str]="[GO]" ,lowerCamelCase_: Optional[Any]="[s]" ,lowerCamelCase_: Any="[GO]" ,**lowerCamelCase_: Dict ) -> Optional[int]:
super().__init__(
unk_token=lowerCamelCase_ ,bos_token=lowerCamelCase_ ,eos_token=lowerCamelCase_ ,pad_token=lowerCamelCase_ ,**lowerCamelCase_ ,)
with open(lowerCamelCase_ ,encoding="""utf-8""" ) as vocab_handle:
UpperCAmelCase_ : Optional[int] = json.load(lowerCamelCase_ )
UpperCAmelCase_ : int = {v: k for k, v in self.vocab.items()}
@property
def A__ ( self: Dict ) -> Optional[Any]:
return len(self.vocab )
def A__ ( self: Tuple ) -> int:
return dict(self.vocab ,**self.added_tokens_encoder )
def A__ ( self: Dict ,lowerCamelCase_: List[str] ) -> List[str]:
UpperCAmelCase_ : Dict = []
for s in text:
char_tokens.extend(lowerCamelCase_ )
return char_tokens
def A__ ( self: Optional[int] ,lowerCamelCase_: int ) -> Optional[int]:
return self.vocab.get(lowerCamelCase_ ,self.vocab.get(self.unk_token ) )
def A__ ( self: Optional[Any] ,lowerCamelCase_: List[str] ) -> Dict:
return self.decoder.get(lowerCamelCase_ )
def A__ ( self: Tuple ,lowerCamelCase_: str ,lowerCamelCase_: Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(lowerCamelCase_ ):
logger.error("""Vocabulary path ({}) should be a directory""".format(lowerCamelCase_ ) )
return
UpperCAmelCase_ : Optional[int] = os.path.join(
lowerCamelCase_ ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(json.dumps(self.vocab ,indent=2 ,sort_keys=lowerCamelCase_ ,ensure_ascii=lowerCamelCase_ ) + """\n""" )
return (vocab_file,)
| 345 |
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
UpperCamelCase_ = {
'''susnato/ernie-m-base_pytorch''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json''',
'''susnato/ernie-m-large_pytorch''': '''https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json''',
}
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : Union[str, Any] = "ernie_m"
A__ : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self: str ,lowerCamelCase_: int = 250002 ,lowerCamelCase_: int = 768 ,lowerCamelCase_: int = 12 ,lowerCamelCase_: int = 12 ,lowerCamelCase_: int = 3072 ,lowerCamelCase_: str = "gelu" ,lowerCamelCase_: float = 0.1 ,lowerCamelCase_: float = 0.1 ,lowerCamelCase_: int = 514 ,lowerCamelCase_: float = 0.0_2 ,lowerCamelCase_: int = 1 ,lowerCamelCase_: float = 1e-05 ,lowerCamelCase_: Any=None ,lowerCamelCase_: List[Any]=False ,lowerCamelCase_: Tuple=0.0 ,**lowerCamelCase_: Optional[int] ,) -> Optional[Any]:
super().__init__(pad_token_id=lowerCamelCase_ ,**lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = vocab_size
UpperCAmelCase_ : Any = hidden_size
UpperCAmelCase_ : Optional[Any] = num_hidden_layers
UpperCAmelCase_ : Union[str, Any] = num_attention_heads
UpperCAmelCase_ : List[Any] = intermediate_size
UpperCAmelCase_ : List[Any] = hidden_act
UpperCAmelCase_ : Any = hidden_dropout_prob
UpperCAmelCase_ : List[Any] = attention_probs_dropout_prob
UpperCAmelCase_ : str = max_position_embeddings
UpperCAmelCase_ : Union[str, Any] = initializer_range
UpperCAmelCase_ : Union[str, Any] = layer_norm_eps
UpperCAmelCase_ : List[Any] = classifier_dropout
UpperCAmelCase_ : str = is_decoder
UpperCAmelCase_ : List[str] = act_dropout
| 345 | 1 |
import math
def lowerCamelCase_ ( _a : list , _a : int ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = len(_a )
UpperCAmelCase_ : Tuple = int(math.floor(math.sqrt(_a ) ) )
UpperCAmelCase_ : Union[str, Any] = 0
while arr[min(_a , _a ) - 1] < x:
UpperCAmelCase_ : Tuple = step
step += int(math.floor(math.sqrt(_a ) ) )
if prev >= n:
return -1
while arr[prev] < x:
UpperCAmelCase_ : str = prev + 1
if prev == min(_a , _a ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
UpperCamelCase_ = input('''Enter numbers separated by a comma:\n''').strip()
UpperCamelCase_ = [int(item) for item in user_input.split(''',''')]
UpperCamelCase_ = int(input('''Enter the number to be searched:\n'''))
UpperCamelCase_ = jump_search(arr, x)
if res == -1:
print('''Number not found!''')
else:
print(F"Number {x} is at index {res}")
| 345 |
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
UpperCamelCase_ = logging.getLogger(__name__)
torch.set_grad_enabled(False)
UpperCamelCase_ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
def lowerCamelCase_ ( _a : str , _a : Any=100 , _a : int=" " ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = text.split(_a )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(_a ) , _a )]
def lowerCamelCase_ ( _a : dict ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ : Dict = [], []
for title, text in zip(documents["""title"""] , documents["""text"""] ):
if text is not None:
for passage in split_text(_a ):
titles.append(title if title is not None else """""" )
texts.append(_a )
return {"title": titles, "text": texts}
def lowerCamelCase_ ( _a : dict , _a : DPRContextEncoder , _a : DPRContextEncoderTokenizerFast ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = ctx_tokenizer(
documents["""title"""] , documents["""text"""] , truncation=_a , padding="""longest""" , return_tensors="""pt""" )["""input_ids"""]
UpperCAmelCase_ : Tuple = ctx_encoder(input_ids.to(device=_a ) , return_dict=_a ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def lowerCamelCase_ ( _a : "RagExampleArguments" , _a : "ProcessingArguments" , _a : "IndexHnswArguments" , ):
'''simple docstring'''
logger.info("""Step 1 - Create the dataset""" )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
UpperCAmelCase_ : Optional[int] = load_dataset(
"""csv""" , data_files=[rag_example_args.csv_path] , split="""train""" , delimiter="""\t""" , column_names=["""title""", """text"""] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
UpperCAmelCase_ : Tuple = dataset.map(_a , batched=_a , num_proc=processing_args.num_proc )
# And compute the embeddings
UpperCAmelCase_ : List[str] = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=_a )
UpperCAmelCase_ : Dict = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
UpperCAmelCase_ : Any = Features(
{"""text""": Value("""string""" ), """title""": Value("""string""" ), """embeddings""": Sequence(Value("""float32""" ) )} ) # optional, save as float32 instead of float64 to save space
UpperCAmelCase_ : List[str] = dataset.map(
partial(_a , ctx_encoder=_a , ctx_tokenizer=_a ) , batched=_a , batch_size=processing_args.batch_size , features=_a , )
# And finally save your dataset
UpperCAmelCase_ : Union[str, Any] = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset""" )
dataset.save_to_disk(_a )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info("""Step 2 - Index the dataset""" )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
UpperCAmelCase_ : Union[str, Any] = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index("""embeddings""" , custom_index=_a )
# And save the index
UpperCAmelCase_ : Optional[Any] = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset_hnsw_index.faiss""" )
dataset.get_index("""embeddings""" ).save(_a )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class _snake_case :
'''simple docstring'''
A__ : str = field(
default=str(Path(__snake_case ).parent / "test_run" / "dummy-kb" / "my_knowledge_dataset.csv" ) , metadata={"help": "Path to a tab-separated csv file with columns 'title' and 'text'"} , )
A__ : Optional[str] = field(
default=__snake_case , metadata={"help": "Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."} , )
A__ : str = field(
default="facebook/rag-sequence-nq" , metadata={"help": "The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"} , )
A__ : str = field(
default="facebook/dpr-ctx_encoder-multiset-base" , metadata={
"help": (
"The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or"
" 'facebook/dpr-ctx_encoder-multiset-base'"
)
} , )
A__ : Optional[str] = field(
default=str(Path(__snake_case ).parent / "test_run" / "dummy-kb" ) , metadata={"help": "Path to a directory where the dataset passages and the index will be saved"} , )
@dataclass
class _snake_case :
'''simple docstring'''
A__ : Optional[int] = field(
default=__snake_case , metadata={
"help": "The number of processes to use to split the documents into passages. Default is single process."
} , )
A__ : int = field(
default=16 , metadata={
"help": "The batch size to use when computing the passages embeddings using the DPR context encoder."
} , )
@dataclass
class _snake_case :
'''simple docstring'''
A__ : int = field(
default=768 , metadata={"help": "The dimension of the embeddings to pass to the HNSW Faiss index."} , )
A__ : int = field(
default=128 , metadata={
"help": (
"The number of bi-directional links created for every new element during the HNSW index construction."
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
UpperCamelCase_ = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
UpperCamelCase_ = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 345 | 1 |
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotSmallConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
UpperCamelCase_ = '''platform'''
import jax
import jax.numpy as jnp
from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
shift_tokens_right,
)
def lowerCamelCase_ ( _a : Union[str, Any] , _a : Optional[int] , _a : Optional[Any]=None , _a : Any=None , _a : str=None , _a : List[str]=None , _a : Any=None , _a : Optional[int]=None , ):
'''simple docstring'''
if attention_mask is None:
UpperCAmelCase_ : Tuple = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
UpperCAmelCase_ : Tuple = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
UpperCAmelCase_ : str = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase_ : Optional[int] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase_ : str = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class _snake_case :
'''simple docstring'''
def __init__( self: Any ,lowerCamelCase_: str ,lowerCamelCase_: List[Any]=13 ,lowerCamelCase_: Dict=7 ,lowerCamelCase_: List[Any]=True ,lowerCamelCase_: Optional[Any]=False ,lowerCamelCase_: Optional[Any]=99 ,lowerCamelCase_: Optional[int]=16 ,lowerCamelCase_: Optional[Any]=2 ,lowerCamelCase_: List[Any]=4 ,lowerCamelCase_: Dict=4 ,lowerCamelCase_: Tuple="gelu" ,lowerCamelCase_: Optional[Any]=0.1 ,lowerCamelCase_: List[str]=0.1 ,lowerCamelCase_: List[Any]=32 ,lowerCamelCase_: Tuple=2 ,lowerCamelCase_: List[Any]=1 ,lowerCamelCase_: Union[str, Any]=0 ,lowerCamelCase_: Dict=0.0_2 ,) -> List[str]:
UpperCAmelCase_ : Tuple = parent
UpperCAmelCase_ : Optional[int] = batch_size
UpperCAmelCase_ : List[str] = seq_length
UpperCAmelCase_ : int = is_training
UpperCAmelCase_ : str = use_labels
UpperCAmelCase_ : Optional[Any] = vocab_size
UpperCAmelCase_ : Union[str, Any] = hidden_size
UpperCAmelCase_ : Any = num_hidden_layers
UpperCAmelCase_ : Dict = num_attention_heads
UpperCAmelCase_ : str = intermediate_size
UpperCAmelCase_ : str = hidden_act
UpperCAmelCase_ : Dict = hidden_dropout_prob
UpperCAmelCase_ : Tuple = attention_probs_dropout_prob
UpperCAmelCase_ : Any = max_position_embeddings
UpperCAmelCase_ : List[str] = eos_token_id
UpperCAmelCase_ : Optional[int] = pad_token_id
UpperCAmelCase_ : Dict = bos_token_id
UpperCAmelCase_ : int = initializer_range
def A__ ( self: str ) -> str:
UpperCAmelCase_ : List[str] = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] ,self.vocab_size ) ,3 ,self.vocab_size )
UpperCAmelCase_ : Tuple = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) ,dtype=np.intaa )) ,-1 )
UpperCAmelCase_ : List[str] = shift_tokens_right(lowerCamelCase_ ,1 ,2 )
UpperCAmelCase_ : str = BlenderbotSmallConfig(
vocab_size=self.vocab_size ,d_model=self.hidden_size ,encoder_layers=self.num_hidden_layers ,decoder_layers=self.num_hidden_layers ,encoder_attention_heads=self.num_attention_heads ,decoder_attention_heads=self.num_attention_heads ,encoder_ffn_dim=self.intermediate_size ,decoder_ffn_dim=self.intermediate_size ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,eos_token_id=self.eos_token_id ,bos_token_id=self.bos_token_id ,pad_token_id=self.pad_token_id ,initializer_range=self.initializer_range ,use_cache=lowerCamelCase_ ,)
UpperCAmelCase_ : List[str] = prepare_blenderbot_inputs_dict(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
return config, inputs_dict
def A__ ( self: Tuple ) -> Dict:
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.prepare_config_and_inputs()
return config, inputs_dict
def A__ ( self: int ,lowerCamelCase_: Tuple ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Tuple ) -> List[Any]:
UpperCAmelCase_ : int = 20
UpperCAmelCase_ : Optional[int] = model_class_name(lowerCamelCase_ )
UpperCAmelCase_ : List[str] = model.encode(inputs_dict["""input_ids"""] )
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
UpperCAmelCase_ : Optional[Any] = model.init_cache(decoder_input_ids.shape[0] ,lowerCamelCase_ ,lowerCamelCase_ )
UpperCAmelCase_ : List[str] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) ,dtype="""i4""" )
UpperCAmelCase_ : Optional[Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] ,(decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) ,)
UpperCAmelCase_ : Any = model.decode(
decoder_input_ids[:, :-1] ,lowerCamelCase_ ,decoder_attention_mask=lowerCamelCase_ ,past_key_values=lowerCamelCase_ ,decoder_position_ids=lowerCamelCase_ ,)
UpperCAmelCase_ : Union[str, Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] ,dtype="""i4""" )
UpperCAmelCase_ : List[Any] = model.decode(
decoder_input_ids[:, -1:] ,lowerCamelCase_ ,decoder_attention_mask=lowerCamelCase_ ,past_key_values=outputs_cache.past_key_values ,decoder_position_ids=lowerCamelCase_ ,)
UpperCAmelCase_ : List[Any] = model.decode(lowerCamelCase_ ,lowerCamelCase_ )
UpperCAmelCase_ : Tuple = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 ,msg=F'''Max diff is {diff}''' )
def A__ ( self: Tuple ,lowerCamelCase_: Any ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Optional[Any] ) -> List[Any]:
UpperCAmelCase_ : List[Any] = 20
UpperCAmelCase_ : str = model_class_name(lowerCamelCase_ )
UpperCAmelCase_ : List[str] = model.encode(inputs_dict["""input_ids"""] )
UpperCAmelCase_ , UpperCAmelCase_ : Dict = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
UpperCAmelCase_ : Any = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] ,axis=-1 ,)
UpperCAmelCase_ : Any = model.init_cache(decoder_input_ids.shape[0] ,lowerCamelCase_ ,lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] ,(decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) ,)
UpperCAmelCase_ : List[str] = model.decode(
decoder_input_ids[:, :-1] ,lowerCamelCase_ ,decoder_attention_mask=lowerCamelCase_ ,past_key_values=lowerCamelCase_ ,decoder_position_ids=lowerCamelCase_ ,)
UpperCAmelCase_ : Optional[Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] ,dtype="""i4""" )
UpperCAmelCase_ : Optional[Any] = model.decode(
decoder_input_ids[:, -1:] ,lowerCamelCase_ ,past_key_values=outputs_cache.past_key_values ,decoder_attention_mask=lowerCamelCase_ ,decoder_position_ids=lowerCamelCase_ ,)
UpperCAmelCase_ : Union[str, Any] = model.decode(lowerCamelCase_ ,lowerCamelCase_ ,decoder_attention_mask=lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 ,msg=F'''Max diff is {diff}''' )
@require_flax
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
A__ : int = 99
def A__ ( self: int ) -> Any:
UpperCAmelCase_ : List[str] = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] ,dtype=np.intaa ,)
UpperCAmelCase_ : str = input_ids.shape[0]
UpperCAmelCase_ : int = BlenderbotSmallConfig(
vocab_size=self.vocab_size ,d_model=24 ,encoder_layers=2 ,decoder_layers=2 ,encoder_attention_heads=2 ,decoder_attention_heads=2 ,encoder_ffn_dim=32 ,decoder_ffn_dim=32 ,max_position_embeddings=48 ,eos_token_id=2 ,pad_token_id=1 ,bos_token_id=0 ,)
return config, input_ids, batch_size
def A__ ( self: int ) -> str:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self._get_config_and_data()
UpperCAmelCase_ : List[str] = FlaxBlenderbotSmallForConditionalGeneration(lowerCamelCase_ )
UpperCAmelCase_ : Tuple = lm_model(input_ids=lowerCamelCase_ )
UpperCAmelCase_ : str = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["""logits"""].shape ,lowerCamelCase_ )
def A__ ( self: Optional[int] ) -> str:
UpperCAmelCase_ : str = BlenderbotSmallConfig(
vocab_size=self.vocab_size ,d_model=14 ,encoder_layers=2 ,decoder_layers=2 ,encoder_attention_heads=2 ,decoder_attention_heads=2 ,encoder_ffn_dim=8 ,decoder_ffn_dim=8 ,max_position_embeddings=48 ,)
UpperCAmelCase_ : Optional[int] = FlaxBlenderbotSmallForConditionalGeneration(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] ,dtype=np.intaa )
UpperCAmelCase_ : Union[str, Any] = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] ,dtype=np.intaa )
UpperCAmelCase_ : Any = lm_model(input_ids=lowerCamelCase_ ,decoder_input_ids=lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = (*summary.shape, config.vocab_size)
self.assertEqual(outputs["""logits"""].shape ,lowerCamelCase_ )
def A__ ( self: List[Any] ) -> Optional[int]:
UpperCAmelCase_ : Optional[int] = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] ,dtype=np.intaa )
UpperCAmelCase_ : int = shift_tokens_right(lowerCamelCase_ ,1 ,2 )
UpperCAmelCase_ : str = np.equal(lowerCamelCase_ ,1 ).astype(np.floataa ).sum()
UpperCAmelCase_ : List[str] = np.equal(lowerCamelCase_ ,1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape ,input_ids.shape )
self.assertEqual(lowerCamelCase_ ,n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] ,2 ).all() )
@require_flax
class _snake_case ( __snake_case , unittest.TestCase , __snake_case ):
'''simple docstring'''
A__ : Optional[Any] = True
A__ : Any = (
(
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallForConditionalGeneration,
)
if is_flax_available()
else ()
)
A__ : Dict = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else ()
def A__ ( self: int ) -> Tuple:
UpperCAmelCase_ : Union[str, Any] = FlaxBlenderbotSmallModelTester(self )
def A__ ( self: List[Any] ) -> Optional[Any]:
UpperCAmelCase_ , UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
def A__ ( self: Optional[int] ) -> str:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
def A__ ( self: Dict ) -> Optional[Any]:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase_ : int = self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ )
UpperCAmelCase_ : Any = model_class(lowerCamelCase_ )
@jax.jit
def encode_jitted(lowerCamelCase_: Tuple ,lowerCamelCase_: Tuple=None ,**lowerCamelCase_: Union[str, Any] ):
return model.encode(input_ids=lowerCamelCase_ ,attention_mask=lowerCamelCase_ )
with self.subTest("""JIT Enabled""" ):
UpperCAmelCase_ : Tuple = encode_jitted(**lowerCamelCase_ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
UpperCAmelCase_ : List[str] = encode_jitted(**lowerCamelCase_ ).to_tuple()
self.assertEqual(len(lowerCamelCase_ ) ,len(lowerCamelCase_ ) )
for jitted_output, output in zip(lowerCamelCase_ ,lowerCamelCase_ ):
self.assertEqual(jitted_output.shape ,output.shape )
def A__ ( self: Any ) -> Optional[Any]:
UpperCAmelCase_ , UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase_ : List[str] = model_class(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = model.encode(inputs_dict["""input_ids"""] ,inputs_dict["""attention_mask"""] )
UpperCAmelCase_ : int = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(lowerCamelCase_: List[Any] ,lowerCamelCase_: Dict ,lowerCamelCase_: Dict ):
return model.decode(
decoder_input_ids=lowerCamelCase_ ,decoder_attention_mask=lowerCamelCase_ ,encoder_outputs=lowerCamelCase_ ,)
with self.subTest("""JIT Enabled""" ):
UpperCAmelCase_ : Any = decode_jitted(**lowerCamelCase_ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
UpperCAmelCase_ : Optional[Any] = decode_jitted(**lowerCamelCase_ ).to_tuple()
self.assertEqual(len(lowerCamelCase_ ) ,len(lowerCamelCase_ ) )
for jitted_output, output in zip(lowerCamelCase_ ,lowerCamelCase_ ):
self.assertEqual(jitted_output.shape ,output.shape )
@slow
def A__ ( self: Dict ) -> Tuple:
for model_class_name in self.all_model_classes:
UpperCAmelCase_ : Any = model_class_name.from_pretrained("""facebook/blenderbot_small-90M""" )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
UpperCAmelCase_ : Tuple = np.ones((1, 1) ) * model.config.eos_token_id
UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
| 345 |
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class _snake_case ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
A__ : Dict = AutoencoderKL
A__ : Optional[int] = "sample"
A__ : Tuple = 1E-2
@property
def A__ ( self: List[Any] ) -> Union[str, Any]:
UpperCAmelCase_ : Tuple = 4
UpperCAmelCase_ : str = 3
UpperCAmelCase_ : Any = (32, 32)
UpperCAmelCase_ : Optional[int] = floats_tensor((batch_size, num_channels) + sizes ).to(lowerCamelCase_ )
return {"sample": image}
@property
def A__ ( self: List[str] ) -> Tuple:
return (3, 32, 32)
@property
def A__ ( self: Optional[Any] ) -> Any:
return (3, 32, 32)
def A__ ( self: Any ) -> Tuple:
UpperCAmelCase_ : List[Any] = {
"""block_out_channels""": [32, 64],
"""in_channels""": 3,
"""out_channels""": 3,
"""down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],
"""up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],
"""latent_channels""": 4,
}
UpperCAmelCase_ : int = self.dummy_input
return init_dict, inputs_dict
def A__ ( self: Optional[Any] ) -> int:
pass
def A__ ( self: str ) -> Any:
pass
@unittest.skipIf(torch_device == """mps""" ,"""Gradient checkpointing skipped on MPS""" )
def A__ ( self: Union[str, Any] ) -> Dict:
# enable deterministic behavior for gradient checkpointing
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.prepare_init_args_and_inputs_for_common()
UpperCAmelCase_ : List[Any] = self.model_class(**lowerCamelCase_ )
model.to(lowerCamelCase_ )
assert not model.is_gradient_checkpointing and model.training
UpperCAmelCase_ : Optional[Any] = model(**lowerCamelCase_ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
UpperCAmelCase_ : Any = torch.randn_like(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
UpperCAmelCase_ : str = self.model_class(**lowerCamelCase_ )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(lowerCamelCase_ )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
UpperCAmelCase_ : Optional[int] = model_a(**lowerCamelCase_ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
UpperCAmelCase_ : Dict = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1e-5 )
UpperCAmelCase_ : Dict = dict(model.named_parameters() )
UpperCAmelCase_ : Union[str, Any] = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data ,named_params_a[name].grad.data ,atol=5e-5 ) )
def A__ ( self: Optional[Any] ) -> str:
UpperCAmelCase_ , UpperCAmelCase_ : int = AutoencoderKL.from_pretrained("""fusing/autoencoder-kl-dummy""" ,output_loading_info=lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
self.assertEqual(len(loading_info["""missing_keys"""] ) ,0 )
model.to(lowerCamelCase_ )
UpperCAmelCase_ : Dict = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def A__ ( self: Optional[int] ) -> int:
UpperCAmelCase_ : Dict = AutoencoderKL.from_pretrained("""fusing/autoencoder-kl-dummy""" )
UpperCAmelCase_ : Tuple = model.to(lowerCamelCase_ )
model.eval()
if torch_device == "mps":
UpperCAmelCase_ : Tuple = torch.manual_seed(0 )
else:
UpperCAmelCase_ : Optional[int] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
UpperCAmelCase_ : str = torch.randn(
1 ,model.config.in_channels ,model.config.sample_size ,model.config.sample_size ,generator=torch.manual_seed(0 ) ,)
UpperCAmelCase_ : int = image.to(lowerCamelCase_ )
with torch.no_grad():
UpperCAmelCase_ : Dict = model(lowerCamelCase_ ,sample_posterior=lowerCamelCase_ ,generator=lowerCamelCase_ ).sample
UpperCAmelCase_ : Optional[int] = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
UpperCAmelCase_ : Tuple = torch.tensor(
[
-4.0078e-01,
-3.8323e-04,
-1.2681e-01,
-1.1462e-01,
2.0095e-01,
1.0893e-01,
-8.8247e-02,
-3.0361e-01,
-9.8644e-03,
] )
elif torch_device == "cpu":
UpperCAmelCase_ : List[str] = torch.tensor(
[-0.1_3_5_2, 0.0_8_7_8, 0.0_4_1_9, -0.0_8_1_8, -0.1_0_6_9, 0.0_6_8_8, -0.1_4_5_8, -0.4_4_4_6, -0.0_0_2_6] )
else:
UpperCAmelCase_ : List[str] = torch.tensor(
[-0.2_4_2_1, 0.4_6_4_2, 0.2_5_0_7, -0.0_4_3_8, 0.0_6_8_2, 0.3_1_6_0, -0.2_0_1_8, -0.0_7_2_7, 0.2_4_8_5] )
self.assertTrue(torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,rtol=1e-2 ) )
@slow
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: Any ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Any ) -> Optional[Any]:
return F'''gaussian_noise_s={seed}_shape={'_'.join([str(lowerCamelCase_ ) for s in shape] )}.npy'''
def A__ ( self: Union[str, Any] ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self: List[str] ,lowerCamelCase_: Optional[int]=0 ,lowerCamelCase_: List[Any]=(4, 3, 512, 512) ,lowerCamelCase_: Optional[Any]=False ) -> Optional[int]:
UpperCAmelCase_ : Tuple = torch.floataa if fpaa else torch.floataa
UpperCAmelCase_ : Tuple = torch.from_numpy(load_hf_numpy(self.get_file_format(lowerCamelCase_ ,lowerCamelCase_ ) ) ).to(lowerCamelCase_ ).to(lowerCamelCase_ )
return image
def A__ ( self: List[Any] ,lowerCamelCase_: List[str]="CompVis/stable-diffusion-v1-4" ,lowerCamelCase_: Union[str, Any]=False ) -> Any:
UpperCAmelCase_ : Optional[Any] = """fp16""" if fpaa else None
UpperCAmelCase_ : str = torch.floataa if fpaa else torch.floataa
UpperCAmelCase_ : int = AutoencoderKL.from_pretrained(
lowerCamelCase_ ,subfolder="""vae""" ,torch_dtype=lowerCamelCase_ ,revision=lowerCamelCase_ ,)
model.to(lowerCamelCase_ ).eval()
return model
def A__ ( self: Dict ,lowerCamelCase_: Union[str, Any]=0 ) -> Optional[int]:
if torch_device == "mps":
return torch.manual_seed(lowerCamelCase_ )
return torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_6_0_3, 0.9_8_7_8, -0.0_4_9_5, -0.0_7_9_0, -0.2_7_0_9, 0.8_3_7_5, -0.2_0_6_0, -0.0_8_2_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]],
[47, [-0.2_3_7_6, 0.1_1_6_8, 0.1_3_3_2, -0.4_8_4_0, -0.2_5_0_8, -0.0_7_9_1, -0.0_4_9_3, -0.4_0_8_9], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]],
# fmt: on
] )
def A__ ( self: List[Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: str ,lowerCamelCase_: Dict ) -> Tuple:
UpperCAmelCase_ : List[Any] = self.get_sd_vae_model()
UpperCAmelCase_ : int = self.get_sd_image(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = self.get_generator(lowerCamelCase_ )
with torch.no_grad():
UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ ,generator=lowerCamelCase_ ,sample_posterior=lowerCamelCase_ ).sample
assert sample.shape == image.shape
UpperCAmelCase_ : Optional[Any] = sample[-1, -2:, -2:, :2].flatten().float().cpu()
UpperCAmelCase_ : Tuple = torch.tensor(expected_slice_mps if torch_device == """mps""" else expected_slice )
assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0_5_1_3, 0.0_2_8_9, 1.3_7_9_9, 0.2_1_6_6, -0.2_5_7_3, -0.0_8_7_1, 0.5_1_0_3, -0.0_9_9_9]],
[47, [-0.4_1_2_8, -0.1_3_2_0, -0.3_7_0_4, 0.1_9_6_5, -0.4_1_1_6, -0.2_3_3_2, -0.3_3_4_0, 0.2_2_4_7]],
# fmt: on
] )
@require_torch_gpu
def A__ ( self: Union[str, Any] ,lowerCamelCase_: Any ,lowerCamelCase_: List[str] ) -> Tuple:
UpperCAmelCase_ : List[str] = self.get_sd_vae_model(fpaa=lowerCamelCase_ )
UpperCAmelCase_ : Any = self.get_sd_image(lowerCamelCase_ ,fpaa=lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = self.get_generator(lowerCamelCase_ )
with torch.no_grad():
UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ ,generator=lowerCamelCase_ ,sample_posterior=lowerCamelCase_ ).sample
assert sample.shape == image.shape
UpperCAmelCase_ : Tuple = sample[-1, -2:, :2, -2:].flatten().float().cpu()
UpperCAmelCase_ : Optional[int] = torch.tensor(lowerCamelCase_ )
assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_6_0_9, 0.9_8_6_6, -0.0_4_8_7, -0.0_7_7_7, -0.2_7_1_6, 0.8_3_6_8, -0.2_0_5_5, -0.0_8_1_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]],
[47, [-0.2_3_7_7, 0.1_1_4_7, 0.1_3_3_3, -0.4_8_4_1, -0.2_5_0_6, -0.0_8_0_5, -0.0_4_9_1, -0.4_0_8_5], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]],
# fmt: on
] )
def A__ ( self: Tuple ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: List[str] ) -> Dict:
UpperCAmelCase_ : Optional[int] = self.get_sd_vae_model()
UpperCAmelCase_ : Dict = self.get_sd_image(lowerCamelCase_ )
with torch.no_grad():
UpperCAmelCase_ : str = model(lowerCamelCase_ ).sample
assert sample.shape == image.shape
UpperCAmelCase_ : List[Any] = sample[-1, -2:, -2:, :2].flatten().float().cpu()
UpperCAmelCase_ : Any = torch.tensor(expected_slice_mps if torch_device == """mps""" else expected_slice )
assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2_0_5_1, -0.1_8_0_3, -0.2_3_1_1, -0.2_1_1_4, -0.3_2_9_2, -0.3_5_7_4, -0.2_9_5_3, -0.3_3_2_3]],
[37, [-0.2_6_3_2, -0.2_6_2_5, -0.2_1_9_9, -0.2_7_4_1, -0.4_5_3_9, -0.4_9_9_0, -0.3_7_2_0, -0.4_9_2_5]],
# fmt: on
] )
@require_torch_gpu
def A__ ( self: Optional[Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: str ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = self.get_sd_vae_model()
UpperCAmelCase_ : Optional[int] = self.get_sd_image(lowerCamelCase_ ,shape=(3, 4, 64, 64) )
with torch.no_grad():
UpperCAmelCase_ : str = model.decode(lowerCamelCase_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
UpperCAmelCase_ : Any = sample[-1, -2:, :2, -2:].flatten().cpu()
UpperCAmelCase_ : Union[str, Any] = torch.tensor(lowerCamelCase_ )
assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=1e-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0_3_6_9, 0.0_2_0_7, -0.0_7_7_6, -0.0_6_8_2, -0.1_7_4_7, -0.1_9_3_0, -0.1_4_6_5, -0.2_0_3_9]],
[16, [-0.1_6_2_8, -0.2_1_3_4, -0.2_7_4_7, -0.2_6_4_2, -0.3_7_7_4, -0.4_4_0_4, -0.3_6_8_7, -0.4_2_7_7]],
# fmt: on
] )
@require_torch_gpu
def A__ ( self: str ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Any ) -> Optional[Any]:
UpperCAmelCase_ : Dict = self.get_sd_vae_model(fpaa=lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = self.get_sd_image(lowerCamelCase_ ,shape=(3, 4, 64, 64) ,fpaa=lowerCamelCase_ )
with torch.no_grad():
UpperCAmelCase_ : List[str] = model.decode(lowerCamelCase_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
UpperCAmelCase_ : str = sample[-1, -2:, :2, -2:].flatten().float().cpu()
UpperCAmelCase_ : str = torch.tensor(lowerCamelCase_ )
assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=5e-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() ,reason="""xformers is not required when using PyTorch 2.0.""" )
def A__ ( self: List[Any] ,lowerCamelCase_: Union[str, Any] ) -> int:
UpperCAmelCase_ : Optional[Any] = self.get_sd_vae_model(fpaa=lowerCamelCase_ )
UpperCAmelCase_ : List[str] = self.get_sd_image(lowerCamelCase_ ,shape=(3, 4, 64, 64) ,fpaa=lowerCamelCase_ )
with torch.no_grad():
UpperCAmelCase_ : Optional[Any] = model.decode(lowerCamelCase_ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
UpperCAmelCase_ : List[str] = model.decode(lowerCamelCase_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=1e-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() ,reason="""xformers is not required when using PyTorch 2.0.""" )
def A__ ( self: Optional[Any] ,lowerCamelCase_: Dict ) -> Union[str, Any]:
UpperCAmelCase_ : Tuple = self.get_sd_vae_model()
UpperCAmelCase_ : Any = self.get_sd_image(lowerCamelCase_ ,shape=(3, 4, 64, 64) )
with torch.no_grad():
UpperCAmelCase_ : Union[str, Any] = model.decode(lowerCamelCase_ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
UpperCAmelCase_ : Optional[Any] = model.decode(lowerCamelCase_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3_0_0_1, 0.0_9_1_8, -2.6_9_8_4, -3.9_7_2_0, -3.2_0_9_9, -5.0_3_5_3, 1.7_3_3_8, -0.2_0_6_5, 3.4_2_6_7]],
[47, [-1.5_0_3_0, -4.3_8_7_1, -6.0_3_5_5, -9.1_1_5_7, -1.6_6_6_1, -2.7_8_5_3, 2.1_6_0_7, -5.0_8_2_3, 2.5_6_3_3]],
# fmt: on
] )
def A__ ( self: Union[str, Any] ,lowerCamelCase_: Any ,lowerCamelCase_: Union[str, Any] ) -> Union[str, Any]:
UpperCAmelCase_ : Dict = self.get_sd_vae_model()
UpperCAmelCase_ : Optional[Any] = self.get_sd_image(lowerCamelCase_ )
UpperCAmelCase_ : str = self.get_generator(lowerCamelCase_ )
with torch.no_grad():
UpperCAmelCase_ : int = model.encode(lowerCamelCase_ ).latent_dist
UpperCAmelCase_ : Optional[Any] = dist.sample(generator=lowerCamelCase_ )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
UpperCAmelCase_ : Tuple = sample[0, -1, -3:, -3:].flatten().cpu()
UpperCAmelCase_ : Optional[Any] = torch.tensor(lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = 3e-3 if torch_device != """mps""" else 1e-2
assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=lowerCamelCase_ )
| 345 | 1 |
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class _snake_case ( nn.Module ):
'''simple docstring'''
A__ : int
A__ : int
A__ : float = 0.0
A__ : int = 1
A__ : int = 1
A__ : bool = True
A__ : bool = False
A__ : bool = False
A__ : bool = False
A__ : jnp.dtype = jnp.floataa
def A__ ( self: Dict ) -> List[str]:
UpperCAmelCase_ : Optional[int] = []
UpperCAmelCase_ : Optional[int] = []
for i in range(self.num_layers ):
UpperCAmelCase_ : List[Any] = self.in_channels if i == 0 else self.out_channels
UpperCAmelCase_ : List[Any] = FlaxResnetBlockaD(
in_channels=lowerCamelCase_ ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = FlaxTransformeraDModel(
in_channels=self.out_channels ,n_heads=self.num_attention_heads ,d_head=self.out_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,only_cross_attention=self.only_cross_attention ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
attentions.append(lowerCamelCase_ )
UpperCAmelCase_ : int = resnets
UpperCAmelCase_ : Tuple = attentions
if self.add_downsample:
UpperCAmelCase_ : List[Any] = FlaxDownsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self: Optional[Any] ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: str ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: int=True ) -> int:
UpperCAmelCase_ : List[Any] = ()
for resnet, attn in zip(self.resnets ,self.attentions ):
UpperCAmelCase_ : str = resnet(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = attn(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
output_states += (hidden_states,)
if self.add_downsample:
UpperCAmelCase_ : List[Any] = self.downsamplers_a(lowerCamelCase_ )
output_states += (hidden_states,)
return hidden_states, output_states
class _snake_case ( nn.Module ):
'''simple docstring'''
A__ : int
A__ : int
A__ : float = 0.0
A__ : int = 1
A__ : bool = True
A__ : jnp.dtype = jnp.floataa
def A__ ( self: Dict ) -> int:
UpperCAmelCase_ : List[str] = []
for i in range(self.num_layers ):
UpperCAmelCase_ : int = self.in_channels if i == 0 else self.out_channels
UpperCAmelCase_ : Dict = FlaxResnetBlockaD(
in_channels=lowerCamelCase_ ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = resnets
if self.add_downsample:
UpperCAmelCase_ : List[str] = FlaxDownsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self: Any ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Any ,lowerCamelCase_: List[Any]=True ) -> Any:
UpperCAmelCase_ : Union[str, Any] = ()
for resnet in self.resnets:
UpperCAmelCase_ : Tuple = resnet(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
output_states += (hidden_states,)
if self.add_downsample:
UpperCAmelCase_ : List[str] = self.downsamplers_a(lowerCamelCase_ )
output_states += (hidden_states,)
return hidden_states, output_states
class _snake_case ( nn.Module ):
'''simple docstring'''
A__ : int
A__ : int
A__ : int
A__ : float = 0.0
A__ : int = 1
A__ : int = 1
A__ : bool = True
A__ : bool = False
A__ : bool = False
A__ : bool = False
A__ : jnp.dtype = jnp.floataa
def A__ ( self: str ) -> Any:
UpperCAmelCase_ : Dict = []
UpperCAmelCase_ : List[str] = []
for i in range(self.num_layers ):
UpperCAmelCase_ : int = self.in_channels if (i == self.num_layers - 1) else self.out_channels
UpperCAmelCase_ : int = self.prev_output_channel if i == 0 else self.out_channels
UpperCAmelCase_ : Optional[Any] = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(lowerCamelCase_ )
UpperCAmelCase_ : int = FlaxTransformeraDModel(
in_channels=self.out_channels ,n_heads=self.num_attention_heads ,d_head=self.out_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,only_cross_attention=self.only_cross_attention ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
attentions.append(lowerCamelCase_ )
UpperCAmelCase_ : List[str] = resnets
UpperCAmelCase_ : Dict = attentions
if self.add_upsample:
UpperCAmelCase_ : Optional[Any] = FlaxUpsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self: Optional[int] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: int ,lowerCamelCase_: Any ,lowerCamelCase_: str ,lowerCamelCase_: List[str]=True ) -> List[str]:
for resnet, attn in zip(self.resnets ,self.attentions ):
# pop res hidden states
UpperCAmelCase_ : List[str] = res_hidden_states_tuple[-1]
UpperCAmelCase_ : Union[str, Any] = res_hidden_states_tuple[:-1]
UpperCAmelCase_ : Optional[Any] = jnp.concatenate((hidden_states, res_hidden_states) ,axis=-1 )
UpperCAmelCase_ : Tuple = resnet(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = attn(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
if self.add_upsample:
UpperCAmelCase_ : Dict = self.upsamplers_a(lowerCamelCase_ )
return hidden_states
class _snake_case ( nn.Module ):
'''simple docstring'''
A__ : int
A__ : int
A__ : int
A__ : float = 0.0
A__ : int = 1
A__ : bool = True
A__ : jnp.dtype = jnp.floataa
def A__ ( self: Dict ) -> Dict:
UpperCAmelCase_ : Any = []
for i in range(self.num_layers ):
UpperCAmelCase_ : str = self.in_channels if (i == self.num_layers - 1) else self.out_channels
UpperCAmelCase_ : Optional[int] = self.prev_output_channel if i == 0 else self.out_channels
UpperCAmelCase_ : Any = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(lowerCamelCase_ )
UpperCAmelCase_ : str = resnets
if self.add_upsample:
UpperCAmelCase_ : Union[str, Any] = FlaxUpsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self: Dict ,lowerCamelCase_: Dict ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Any=True ) -> List[str]:
for resnet in self.resnets:
# pop res hidden states
UpperCAmelCase_ : Dict = res_hidden_states_tuple[-1]
UpperCAmelCase_ : str = res_hidden_states_tuple[:-1]
UpperCAmelCase_ : List[Any] = jnp.concatenate((hidden_states, res_hidden_states) ,axis=-1 )
UpperCAmelCase_ : List[str] = resnet(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
if self.add_upsample:
UpperCAmelCase_ : Optional[Any] = self.upsamplers_a(lowerCamelCase_ )
return hidden_states
class _snake_case ( nn.Module ):
'''simple docstring'''
A__ : int
A__ : float = 0.0
A__ : int = 1
A__ : int = 1
A__ : bool = False
A__ : bool = False
A__ : jnp.dtype = jnp.floataa
def A__ ( self: Dict ) -> List[str]:
# there is always at least one resnet
UpperCAmelCase_ : List[Any] = [
FlaxResnetBlockaD(
in_channels=self.in_channels ,out_channels=self.in_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
]
UpperCAmelCase_ : Any = []
for _ in range(self.num_layers ):
UpperCAmelCase_ : Optional[Any] = FlaxTransformeraDModel(
in_channels=self.in_channels ,n_heads=self.num_attention_heads ,d_head=self.in_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
attentions.append(lowerCamelCase_ )
UpperCAmelCase_ : Any = FlaxResnetBlockaD(
in_channels=self.in_channels ,out_channels=self.in_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(lowerCamelCase_ )
UpperCAmelCase_ : Dict = resnets
UpperCAmelCase_ : Any = attentions
def __call__( self: str ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: str ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Union[str, Any]=True ) -> List[Any]:
UpperCAmelCase_ : List[Any] = self.resnets[0](lowerCamelCase_ ,lowerCamelCase_ )
for attn, resnet in zip(self.attentions ,self.resnets[1:] ):
UpperCAmelCase_ : Optional[Any] = attn(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = resnet(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
return hidden_states
| 345 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=__snake_case )
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : str = field(default="automatic-speech-recognition" , metadata={"include_in_asdict_even_if_is_default": True} )
A__ : ClassVar[Features] = Features({"audio": Audio()} )
A__ : ClassVar[Features] = Features({"transcription": Value("string" )} )
A__ : str = "audio"
A__ : str = "transcription"
def A__ ( self: int ,lowerCamelCase_: Union[str, Any] ) -> Optional[Any]:
if self.audio_column not in features:
raise ValueError(F'''Column {self.audio_column} is not present in features.''' )
if not isinstance(features[self.audio_column] ,lowerCamelCase_ ):
raise ValueError(F'''Column {self.audio_column} is not an Audio type.''' )
UpperCAmelCase_ : Any = copy.deepcopy(self )
UpperCAmelCase_ : Union[str, Any] = self.input_schema.copy()
UpperCAmelCase_ : Any = features[self.audio_column]
UpperCAmelCase_ : Union[str, Any] = input_schema
return task_template
@property
def A__ ( self: List[str] ) -> Dict[str, str]:
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 345 | 1 |
from collections import namedtuple
UpperCamelCase_ = namedtuple('''from_to''', '''from_ to''')
UpperCamelCase_ = {
'''cubicmeter''': from_to(1, 1),
'''litre''': from_to(0.0_0_1, 1000),
'''kilolitre''': from_to(1, 1),
'''gallon''': from_to(0.0_0_4_5_4, 2_6_4.1_7_2),
'''cubicyard''': from_to(0.7_6_4_5_5, 1.3_0_7_9_5),
'''cubicfoot''': from_to(0.0_2_8, 3_5.3_1_4_7),
'''cup''': from_to(0.0_0_0_2_3_6_5_8_8, 4_2_2_6.7_5),
}
def lowerCamelCase_ ( _a : float , _a : str , _a : str ):
'''simple docstring'''
if from_type not in METRIC_CONVERSION:
raise ValueError(
F'''Invalid \'from_type\' value: {from_type!r} Supported values are:\n'''
+ """, """.join(_a ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
F'''Invalid \'to_type\' value: {to_type!r}. Supported values are:\n'''
+ """, """.join(_a ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 345 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'''microsoft/layoutlmv3-base''': '''https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json''',
}
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : Optional[Any] = "layoutlmv3"
def __init__( self: str ,lowerCamelCase_: Any=50265 ,lowerCamelCase_: int=768 ,lowerCamelCase_: Any=12 ,lowerCamelCase_: Any=12 ,lowerCamelCase_: List[Any]=3072 ,lowerCamelCase_: str="gelu" ,lowerCamelCase_: List[str]=0.1 ,lowerCamelCase_: Any=0.1 ,lowerCamelCase_: Tuple=512 ,lowerCamelCase_: Union[str, Any]=2 ,lowerCamelCase_: Dict=0.0_2 ,lowerCamelCase_: List[str]=1e-5 ,lowerCamelCase_: int=1 ,lowerCamelCase_: int=0 ,lowerCamelCase_: List[str]=2 ,lowerCamelCase_: Dict=1024 ,lowerCamelCase_: Tuple=128 ,lowerCamelCase_: Tuple=128 ,lowerCamelCase_: Dict=True ,lowerCamelCase_: Union[str, Any]=32 ,lowerCamelCase_: Union[str, Any]=128 ,lowerCamelCase_: Tuple=64 ,lowerCamelCase_: Tuple=256 ,lowerCamelCase_: List[str]=True ,lowerCamelCase_: Optional[int]=True ,lowerCamelCase_: Any=True ,lowerCamelCase_: Dict=224 ,lowerCamelCase_: Optional[int]=3 ,lowerCamelCase_: Optional[int]=16 ,lowerCamelCase_: Dict=None ,**lowerCamelCase_: str ,) -> List[Any]:
super().__init__(
vocab_size=lowerCamelCase_ ,hidden_size=lowerCamelCase_ ,num_hidden_layers=lowerCamelCase_ ,num_attention_heads=lowerCamelCase_ ,intermediate_size=lowerCamelCase_ ,hidden_act=lowerCamelCase_ ,hidden_dropout_prob=lowerCamelCase_ ,attention_probs_dropout_prob=lowerCamelCase_ ,max_position_embeddings=lowerCamelCase_ ,type_vocab_size=lowerCamelCase_ ,initializer_range=lowerCamelCase_ ,layer_norm_eps=lowerCamelCase_ ,pad_token_id=lowerCamelCase_ ,bos_token_id=lowerCamelCase_ ,eos_token_id=lowerCamelCase_ ,**lowerCamelCase_ ,)
UpperCAmelCase_ : List[Any] = max_ad_position_embeddings
UpperCAmelCase_ : Optional[int] = coordinate_size
UpperCAmelCase_ : Optional[int] = shape_size
UpperCAmelCase_ : Optional[Any] = has_relative_attention_bias
UpperCAmelCase_ : Optional[int] = rel_pos_bins
UpperCAmelCase_ : Union[str, Any] = max_rel_pos
UpperCAmelCase_ : Dict = has_spatial_attention_bias
UpperCAmelCase_ : Optional[int] = rel_ad_pos_bins
UpperCAmelCase_ : Tuple = max_rel_ad_pos
UpperCAmelCase_ : Union[str, Any] = text_embed
UpperCAmelCase_ : Optional[Any] = visual_embed
UpperCAmelCase_ : List[str] = input_size
UpperCAmelCase_ : str = num_channels
UpperCAmelCase_ : Optional[int] = patch_size
UpperCAmelCase_ : Tuple = classifier_dropout
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : Optional[Any] = version.parse("1.12" )
@property
def A__ ( self: Dict ) -> Mapping[str, Mapping[int, str]]:
# The order of inputs is different for question answering and sequence classification
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
else:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels"""}),
] )
@property
def A__ ( self: Any ) -> float:
return 1e-5
@property
def A__ ( self: int ) -> int:
return 12
def A__ ( self: List[str] ,lowerCamelCase_: "ProcessorMixin" ,lowerCamelCase_: int = -1 ,lowerCamelCase_: int = -1 ,lowerCamelCase_: bool = False ,lowerCamelCase_: Optional["TensorType"] = None ,lowerCamelCase_: int = 3 ,lowerCamelCase_: int = 40 ,lowerCamelCase_: int = 40 ,) -> Mapping[str, Any]:
setattr(processor.image_processor ,"""apply_ocr""" ,lowerCamelCase_ )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCAmelCase_ : List[str] = compute_effective_axis_dimension(
lowerCamelCase_ ,fixed_dimension=OnnxConfig.default_fixed_batch ,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCAmelCase_ : int = processor.tokenizer.num_special_tokens_to_add(lowerCamelCase_ )
UpperCAmelCase_ : int = compute_effective_axis_dimension(
lowerCamelCase_ ,fixed_dimension=OnnxConfig.default_fixed_sequence ,num_token_to_add=lowerCamelCase_ )
# Generate dummy inputs according to compute batch and sequence
UpperCAmelCase_ : Optional[int] = [[""" """.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
UpperCAmelCase_ : List[Any] = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
UpperCAmelCase_ : Any = self._generate_dummy_images(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = dict(
processor(
lowerCamelCase_ ,text=lowerCamelCase_ ,boxes=lowerCamelCase_ ,return_tensors=lowerCamelCase_ ,) )
return inputs
| 345 | 1 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
@property
def A__ ( self: Optional[int] ) -> int:
torch.manual_seed(0 )
UpperCAmelCase_ : Union[str, Any] = UNetaDModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") ,up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") ,)
return model
@property
def A__ ( self: Tuple ) -> Optional[Any]:
torch.manual_seed(0 )
UpperCAmelCase_ : List[str] = VQModel(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=3 ,)
return model
@property
def A__ ( self: Tuple ) -> Any:
torch.manual_seed(0 )
UpperCAmelCase_ : int = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,)
return CLIPTextModel(lowerCamelCase_ )
def A__ ( self: str ) -> Optional[Any]:
UpperCAmelCase_ : str = self.dummy_uncond_unet
UpperCAmelCase_ : List[Any] = DDIMScheduler()
UpperCAmelCase_ : List[Any] = self.dummy_vq_model
UpperCAmelCase_ : Optional[int] = LDMPipeline(unet=lowerCamelCase_ ,vqvae=lowerCamelCase_ ,scheduler=lowerCamelCase_ )
ldm.to(lowerCamelCase_ )
ldm.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : Any = torch.manual_seed(0 )
UpperCAmelCase_ : int = ldm(generator=lowerCamelCase_ ,num_inference_steps=2 ,output_type="""numpy""" ).images
UpperCAmelCase_ : List[str] = torch.manual_seed(0 )
UpperCAmelCase_ : Union[str, Any] = ldm(generator=lowerCamelCase_ ,num_inference_steps=2 ,output_type="""numpy""" ,return_dict=lowerCamelCase_ )[0]
UpperCAmelCase_ : Optional[Any] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ : str = np.array([0.8_5_1_2, 0.8_1_8, 0.6_4_1_1, 0.6_8_0_8, 0.4_4_6_5, 0.5_6_1_8, 0.4_6, 0.6_2_3_1, 0.5_1_7_2] )
UpperCAmelCase_ : Tuple = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: Optional[int] ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = LDMPipeline.from_pretrained("""CompVis/ldm-celebahq-256""" )
ldm.to(lowerCamelCase_ )
ldm.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = torch.manual_seed(0 )
UpperCAmelCase_ : Optional[int] = ldm(generator=lowerCamelCase_ ,num_inference_steps=5 ,output_type="""numpy""" ).images
UpperCAmelCase_ : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCAmelCase_ : int = np.array([0.4_3_9_9, 0.4_4_9_7_5, 0.4_6_8_2_5, 0.4_7_4, 0.4_3_5_9, 0.4_5_8_1, 0.4_5_0_9_5, 0.4_3_4_1, 0.4_4_4_7] )
UpperCAmelCase_ : Union[str, Any] = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 345 |
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def lowerCamelCase_ ( _a : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = [
"""decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(_a , _a )
def lowerCamelCase_ ( _a : Any ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = emb.weight.shape
UpperCAmelCase_ : Tuple = nn.Linear(_a , _a , bias=_a )
UpperCAmelCase_ : List[Any] = emb.weight.data
return lin_layer
def lowerCamelCase_ ( _a : Dict ):
'''simple docstring'''
UpperCAmelCase_ : int = torch.load(_a , map_location="""cpu""" )
UpperCAmelCase_ : Dict = Namespace(**checkpoint["""cfg"""]["""model"""] )
UpperCAmelCase_ : Optional[int] = checkpoint["""model"""]
remove_ignore_keys_(_a )
UpperCAmelCase_ : str = state_dict["""decoder.embed_tokens.weight"""].shape[0]
UpperCAmelCase_ : List[str] = {key.replace("""decoder""" , """model""" ): val for key, val in state_dict.items()}
UpperCAmelCase_ : int = XGLMConfig(
vocab_size=_a , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""gelu""" , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , )
UpperCAmelCase_ : List[str] = XGLMForCausalLM(_a )
UpperCAmelCase_ : Tuple = model.load_state_dict(_a , strict=_a )
print(_a )
UpperCAmelCase_ : Optional[Any] = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
UpperCamelCase_ = parser.parse_args()
UpperCamelCase_ = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 345 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : str = (
"This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image."
"It takes two arguments named `image` which should be the original image, and `label` which should be a text "
"describing the elements what should be identified in the segmentation mask. The tool returns the mask."
)
A__ : str = "CIDAS/clipseg-rd64-refined"
A__ : Any = "image_segmenter"
A__ : str = CLIPSegForImageSegmentation
A__ : Optional[int] = ["image", "text"]
A__ : Optional[Any] = ["image"]
def __init__( self: List[str] ,*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: Optional[Any] ) -> Optional[int]:
requires_backends(self ,["""vision"""] )
super().__init__(*lowerCamelCase_ ,**lowerCamelCase_ )
def A__ ( self: Optional[int] ,lowerCamelCase_: "Image" ,lowerCamelCase_: str ) -> Union[str, Any]:
return self.pre_processor(text=[label] ,images=[image] ,padding=lowerCamelCase_ ,return_tensors="""pt""" )
def A__ ( self: str ,lowerCamelCase_: List[str] ) -> Union[str, Any]:
with torch.no_grad():
UpperCAmelCase_ : List[Any] = self.model(**lowerCamelCase_ ).logits
return logits
def A__ ( self: int ,lowerCamelCase_: Any ) -> int:
UpperCAmelCase_ : Tuple = outputs.cpu().detach().numpy()
UpperCAmelCase_ : List[Any] = 0
UpperCAmelCase_ : Union[str, Any] = 1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 345 |
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _snake_case :
'''simple docstring'''
def __init__( self: List[Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Union[str, Any]=13 ,lowerCamelCase_: Optional[int]=32 ,lowerCamelCase_: List[str]=2 ,lowerCamelCase_: Optional[Any]=3 ,lowerCamelCase_: int=16 ,lowerCamelCase_: Optional[Any]=[32, 64, 128] ,lowerCamelCase_: Optional[int]=[1, 2, 1] ,lowerCamelCase_: Union[str, Any]=[2, 2, 4] ,lowerCamelCase_: int=2 ,lowerCamelCase_: List[str]=2.0 ,lowerCamelCase_: List[Any]=True ,lowerCamelCase_: List[str]=0.0 ,lowerCamelCase_: List[str]=0.0 ,lowerCamelCase_: Optional[int]=0.1 ,lowerCamelCase_: Optional[int]="gelu" ,lowerCamelCase_: Any=False ,lowerCamelCase_: Dict=True ,lowerCamelCase_: Union[str, Any]=0.0_2 ,lowerCamelCase_: int=1e-5 ,lowerCamelCase_: int=True ,lowerCamelCase_: Tuple=None ,lowerCamelCase_: str=True ,lowerCamelCase_: Dict=10 ,lowerCamelCase_: str=8 ,lowerCamelCase_: Union[str, Any]=["stage1", "stage2"] ,lowerCamelCase_: Optional[Any]=[1, 2] ,) -> str:
UpperCAmelCase_ : List[Any] = parent
UpperCAmelCase_ : Tuple = batch_size
UpperCAmelCase_ : Any = image_size
UpperCAmelCase_ : str = patch_size
UpperCAmelCase_ : List[str] = num_channels
UpperCAmelCase_ : Dict = embed_dim
UpperCAmelCase_ : Dict = hidden_sizes
UpperCAmelCase_ : str = depths
UpperCAmelCase_ : int = num_heads
UpperCAmelCase_ : List[Any] = window_size
UpperCAmelCase_ : Union[str, Any] = mlp_ratio
UpperCAmelCase_ : int = qkv_bias
UpperCAmelCase_ : List[str] = hidden_dropout_prob
UpperCAmelCase_ : Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase_ : Optional[int] = drop_path_rate
UpperCAmelCase_ : Union[str, Any] = hidden_act
UpperCAmelCase_ : List[Any] = use_absolute_embeddings
UpperCAmelCase_ : List[Any] = patch_norm
UpperCAmelCase_ : int = layer_norm_eps
UpperCAmelCase_ : int = initializer_range
UpperCAmelCase_ : Optional[Any] = is_training
UpperCAmelCase_ : Optional[Any] = scope
UpperCAmelCase_ : Union[str, Any] = use_labels
UpperCAmelCase_ : Union[str, Any] = type_sequence_label_size
UpperCAmelCase_ : Optional[int] = encoder_stride
UpperCAmelCase_ : Optional[int] = out_features
UpperCAmelCase_ : Optional[int] = out_indices
def A__ ( self: Union[str, Any] ) -> List[Any]:
UpperCAmelCase_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ : int = None
if self.use_labels:
UpperCAmelCase_ : str = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase_ : Any = self.get_config()
return config, pixel_values, labels
def A__ ( self: List[Any] ) -> Tuple:
return FocalNetConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,num_heads=self.num_heads ,window_size=self.window_size ,mlp_ratio=self.mlp_ratio ,qkv_bias=self.qkv_bias ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,drop_path_rate=self.drop_path_rate ,hidden_act=self.hidden_act ,use_absolute_embeddings=self.use_absolute_embeddings ,path_norm=self.patch_norm ,layer_norm_eps=self.layer_norm_eps ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,out_features=self.out_features ,out_indices=self.out_indices ,)
def A__ ( self: Dict ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: str ,lowerCamelCase_: str ) -> List[str]:
UpperCAmelCase_ : Optional[int] = FocalNetModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : List[Any] = model(lowerCamelCase_ )
UpperCAmelCase_ : Dict = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
UpperCAmelCase_ : Optional[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, expected_seq_len, expected_dim) )
def A__ ( self: Union[str, Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Any ,lowerCamelCase_: Optional[int] ) -> List[str]:
UpperCAmelCase_ : List[str] = FocalNetBackbone(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Tuple = model(lowerCamelCase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,len(config.out_features ) )
self.parent.assertListEqual(model.channels ,config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : List[str] = FocalNetBackbone(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Tuple = model(lowerCamelCase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,1 )
self.parent.assertListEqual(model.channels ,[config.hidden_sizes[-1]] )
def A__ ( self: Optional[int] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Union[str, Any] ) -> List[Any]:
UpperCAmelCase_ : Any = FocalNetForMaskedImageModeling(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(lowerCamelCase_ )
self.parent.assertEqual(
result.reconstruction.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCAmelCase_ : int = 1
UpperCAmelCase_ : List[str] = FocalNetForMaskedImageModeling(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ : Optional[int] = model(lowerCamelCase_ )
self.parent.assertEqual(result.reconstruction.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def A__ ( self: List[str] ,lowerCamelCase_: List[str] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Any ) -> int:
UpperCAmelCase_ : List[Any] = self.type_sequence_label_size
UpperCAmelCase_ : int = FocalNetForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ ,labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase_ : List[Any] = 1
UpperCAmelCase_ : Optional[int] = FocalNetForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ : List[str] = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def A__ ( self: Union[str, Any] ) -> Optional[int]:
UpperCAmelCase_ : List[Any] = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = config_and_inputs
UpperCAmelCase_ : int = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _snake_case ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
A__ : List[Any] = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
A__ : Union[str, Any] = (
{"feature-extraction": FocalNetModel, "image-classification": FocalNetForImageClassification}
if is_torch_available()
else {}
)
A__ : Optional[Any] = False
A__ : Any = False
A__ : List[str] = False
A__ : Any = False
A__ : Any = False
def A__ ( self: List[str] ) -> Tuple:
UpperCAmelCase_ : Dict = FocalNetModelTester(self )
UpperCAmelCase_ : int = ConfigTester(self ,config_class=lowerCamelCase_ ,embed_dim=37 ,has_text_modality=lowerCamelCase_ )
def A__ ( self: List[str] ) -> int:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A__ ( self: List[str] ) -> Union[str, Any]:
return
def A__ ( self: str ) -> List[str]:
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def A__ ( self: Tuple ) -> int:
UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowerCamelCase_ )
def A__ ( self: Dict ) -> List[str]:
UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase_ )
def A__ ( self: int ) -> int:
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
@unittest.skip(reason="""FocalNet does not use inputs_embeds""" )
def A__ ( self: int ) -> Dict:
pass
@unittest.skip(reason="""FocalNet does not use feedforward chunking""" )
def A__ ( self: Optional[Any] ) -> Optional[Any]:
pass
def A__ ( self: Optional[Any] ) -> List[str]:
UpperCAmelCase_ , UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
UpperCAmelCase_ : Optional[Any] = model_class(lowerCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
UpperCAmelCase_ : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase_ ,nn.Linear ) )
def A__ ( self: str ) -> Optional[int]:
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
UpperCAmelCase_ : str = model_class(lowerCamelCase_ )
UpperCAmelCase_ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : Any = [*signature.parameters.keys()]
UpperCAmelCase_ : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,lowerCamelCase_ )
def A__ ( self: Dict ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Dict ,lowerCamelCase_: Any ) -> List[str]:
UpperCAmelCase_ : Tuple = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : Optional[int] = model(**self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) )
UpperCAmelCase_ : Any = outputs.hidden_states
UpperCAmelCase_ : List[Any] = getattr(
self.model_tester ,"""expected_num_hidden_layers""" ,len(self.model_tester.depths ) + 1 )
self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ )
# FocalNet has a different seq_length
UpperCAmelCase_ : int = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCAmelCase_ : Optional[int] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
UpperCAmelCase_ : Union[str, Any] = outputs.reshaped_hidden_states
self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = reshaped_hidden_states[0].shape
UpperCAmelCase_ : List[Any] = (
reshaped_hidden_states[0].view(lowerCamelCase_ ,lowerCamelCase_ ,height * width ).permute(0 ,2 ,1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
def A__ ( self: Any ) -> List[Any]:
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Optional[int] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
UpperCAmelCase_ : str = True
self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ : Union[str, Any] = True
self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
def A__ ( self: List[str] ) -> str:
UpperCAmelCase_ , UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Tuple = 3
UpperCAmelCase_ : Tuple = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
UpperCAmelCase_ : Union[str, Any] = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCAmelCase_ : Union[str, Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
UpperCAmelCase_ : Any = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
UpperCAmelCase_ : Optional[Any] = True
self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,(padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ : Optional[int] = True
self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,(padded_height, padded_width) )
@slow
def A__ ( self: Optional[int] ) -> Optional[Any]:
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Tuple = FocalNetModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def A__ ( self: Optional[Any] ) -> Optional[int]:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Optional[int] = _config_zero_init(lowerCamelCase_ )
for model_class in self.all_model_classes:
UpperCAmelCase_ : List[Any] = model_class(config=lowerCamelCase_ )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' ,)
@require_vision
@require_torch
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def A__ ( self: Optional[int] ) -> str:
# TODO update organization
return AutoImageProcessor.from_pretrained("""microsoft/focalnet-tiny""" ) if is_vision_available() else None
@slow
def A__ ( self: List[Any] ) -> List[str]:
UpperCAmelCase_ : Optional[int] = FocalNetForImageClassification.from_pretrained("""microsoft/focalnet-tiny""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : Tuple = self.default_image_processor
UpperCAmelCase_ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
UpperCAmelCase_ : Dict = image_processor(images=lowerCamelCase_ ,return_tensors="""pt""" ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : Dict = model(**lowerCamelCase_ )
# verify the logits
UpperCAmelCase_ : str = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,lowerCamelCase_ ,atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() ,281 )
@require_torch
class _snake_case ( __snake_case , unittest.TestCase ):
'''simple docstring'''
A__ : List[Any] = (FocalNetBackbone,) if is_torch_available() else ()
A__ : int = FocalNetConfig
A__ : List[str] = False
def A__ ( self: Any ) -> Optional[int]:
UpperCAmelCase_ : str = FocalNetModelTester(self )
| 345 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'''SCUT-DLVCLab/lilt-roberta-en-base''': (
'''https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json'''
),
}
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : Tuple = "lilt"
def __init__( self: Any ,lowerCamelCase_: List[Any]=30522 ,lowerCamelCase_: Optional[int]=768 ,lowerCamelCase_: Optional[Any]=12 ,lowerCamelCase_: str=12 ,lowerCamelCase_: Any=3072 ,lowerCamelCase_: Dict="gelu" ,lowerCamelCase_: List[str]=0.1 ,lowerCamelCase_: Optional[int]=0.1 ,lowerCamelCase_: Union[str, Any]=512 ,lowerCamelCase_: str=2 ,lowerCamelCase_: Any=0.0_2 ,lowerCamelCase_: Union[str, Any]=1e-12 ,lowerCamelCase_: List[str]=0 ,lowerCamelCase_: Union[str, Any]="absolute" ,lowerCamelCase_: Optional[int]=None ,lowerCamelCase_: Tuple=4 ,lowerCamelCase_: Optional[Any]=1024 ,**lowerCamelCase_: Optional[Any] ,) -> Optional[int]:
super().__init__(pad_token_id=lowerCamelCase_ ,**lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = vocab_size
UpperCAmelCase_ : List[Any] = hidden_size
UpperCAmelCase_ : Optional[int] = num_hidden_layers
UpperCAmelCase_ : int = num_attention_heads
UpperCAmelCase_ : Optional[Any] = hidden_act
UpperCAmelCase_ : int = intermediate_size
UpperCAmelCase_ : str = hidden_dropout_prob
UpperCAmelCase_ : Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase_ : List[Any] = max_position_embeddings
UpperCAmelCase_ : Tuple = type_vocab_size
UpperCAmelCase_ : List[str] = initializer_range
UpperCAmelCase_ : Dict = layer_norm_eps
UpperCAmelCase_ : Union[str, Any] = position_embedding_type
UpperCAmelCase_ : Optional[int] = classifier_dropout
UpperCAmelCase_ : Optional[int] = channel_shrink_ratio
UpperCAmelCase_ : Tuple = max_ad_position_embeddings
| 345 |
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _snake_case :
'''simple docstring'''
def __init__( self: Tuple ,lowerCamelCase_: List[str] ,lowerCamelCase_: int=13 ,lowerCamelCase_: int=32 ,lowerCamelCase_: Optional[int]=2 ,lowerCamelCase_: Any=3 ,lowerCamelCase_: str=16 ,lowerCamelCase_: Optional[Any]=[1, 2, 1] ,lowerCamelCase_: Tuple=[2, 2, 4] ,lowerCamelCase_: int=2 ,lowerCamelCase_: List[Any]=2.0 ,lowerCamelCase_: str=True ,lowerCamelCase_: Optional[int]=0.0 ,lowerCamelCase_: List[Any]=0.0 ,lowerCamelCase_: List[str]=0.1 ,lowerCamelCase_: Tuple="gelu" ,lowerCamelCase_: Union[str, Any]=False ,lowerCamelCase_: Union[str, Any]=True ,lowerCamelCase_: Optional[int]=0.0_2 ,lowerCamelCase_: int=1e-5 ,lowerCamelCase_: Optional[int]=True ,lowerCamelCase_: Union[str, Any]=None ,lowerCamelCase_: Union[str, Any]=True ,lowerCamelCase_: Optional[int]=10 ,lowerCamelCase_: Tuple=8 ,) -> List[Any]:
UpperCAmelCase_ : List[str] = parent
UpperCAmelCase_ : int = batch_size
UpperCAmelCase_ : int = image_size
UpperCAmelCase_ : Union[str, Any] = patch_size
UpperCAmelCase_ : Optional[Any] = num_channels
UpperCAmelCase_ : int = embed_dim
UpperCAmelCase_ : Union[str, Any] = depths
UpperCAmelCase_ : List[str] = num_heads
UpperCAmelCase_ : int = window_size
UpperCAmelCase_ : List[str] = mlp_ratio
UpperCAmelCase_ : Tuple = qkv_bias
UpperCAmelCase_ : Tuple = hidden_dropout_prob
UpperCAmelCase_ : str = attention_probs_dropout_prob
UpperCAmelCase_ : Tuple = drop_path_rate
UpperCAmelCase_ : List[str] = hidden_act
UpperCAmelCase_ : int = use_absolute_embeddings
UpperCAmelCase_ : Any = patch_norm
UpperCAmelCase_ : Optional[int] = layer_norm_eps
UpperCAmelCase_ : Tuple = initializer_range
UpperCAmelCase_ : Optional[Any] = is_training
UpperCAmelCase_ : Dict = scope
UpperCAmelCase_ : int = use_labels
UpperCAmelCase_ : Optional[Any] = type_sequence_label_size
UpperCAmelCase_ : List[str] = encoder_stride
def A__ ( self: Any ) -> int:
UpperCAmelCase_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ : List[Any] = None
if self.use_labels:
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase_ : str = self.get_config()
return config, pixel_values, labels
def A__ ( self: List[Any] ) -> Union[str, Any]:
return SwinvaConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,depths=self.depths ,num_heads=self.num_heads ,window_size=self.window_size ,mlp_ratio=self.mlp_ratio ,qkv_bias=self.qkv_bias ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,drop_path_rate=self.drop_path_rate ,hidden_act=self.hidden_act ,use_absolute_embeddings=self.use_absolute_embeddings ,path_norm=self.patch_norm ,layer_norm_eps=self.layer_norm_eps ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,)
def A__ ( self: Dict ,lowerCamelCase_: Tuple ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: List[str] ) -> str:
UpperCAmelCase_ : str = SwinvaModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
UpperCAmelCase_ : List[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, expected_seq_len, expected_dim) )
def A__ ( self: List[Any] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: int ,lowerCamelCase_: int ) -> int:
UpperCAmelCase_ : Any = SwinvaForMaskedImageModeling(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCAmelCase_ : str = 1
UpperCAmelCase_ : Optional[Any] = SwinvaForMaskedImageModeling(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ : int = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def A__ ( self: int ,lowerCamelCase_: int ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Optional[Any] ) -> int:
UpperCAmelCase_ : Union[str, Any] = self.type_sequence_label_size
UpperCAmelCase_ : int = SwinvaForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Optional[int] = model(lowerCamelCase_ ,labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def A__ ( self: str ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[Any] = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = config_and_inputs
UpperCAmelCase_ : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _snake_case ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
A__ : Tuple = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
A__ : Optional[Any] = (
{"feature-extraction": SwinvaModel, "image-classification": SwinvaForImageClassification}
if is_torch_available()
else {}
)
A__ : List[Any] = False
A__ : Tuple = False
A__ : int = False
A__ : Union[str, Any] = False
def A__ ( self: List[str] ) -> Optional[Any]:
UpperCAmelCase_ : Any = SwinvaModelTester(self )
UpperCAmelCase_ : str = ConfigTester(self ,config_class=lowerCamelCase_ ,embed_dim=37 )
def A__ ( self: Optional[int] ) -> List[Any]:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A__ ( self: Any ) -> Dict:
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
@unittest.skip(reason="""Got `CUDA error: misaligned address` with PyTorch 2.0.0.""" )
def A__ ( self: int ) -> Dict:
pass
@unittest.skip(reason="""Swinv2 does not use inputs_embeds""" )
def A__ ( self: Tuple ) -> List[str]:
pass
def A__ ( self: str ) -> List[Any]:
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : int = model_class(lowerCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
UpperCAmelCase_ : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase_ ,nn.Linear ) )
def A__ ( self: Optional[Any] ) -> Optional[int]:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Dict = model_class(lowerCamelCase_ )
UpperCAmelCase_ : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : int = [*signature.parameters.keys()]
UpperCAmelCase_ : Tuple = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,lowerCamelCase_ )
def A__ ( self: Union[str, Any] ) -> Optional[Any]:
UpperCAmelCase_ , UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Any = True
for model_class in self.all_model_classes:
UpperCAmelCase_ : Optional[Any] = True
UpperCAmelCase_ : Union[str, Any] = False
UpperCAmelCase_ : str = True
UpperCAmelCase_ : List[Any] = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : Optional[int] = model(**self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) )
UpperCAmelCase_ : Optional[Any] = outputs.attentions
UpperCAmelCase_ : List[str] = len(self.model_tester.depths )
self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCAmelCase_ : str = True
UpperCAmelCase_ : Optional[Any] = config.window_size**2
UpperCAmelCase_ : Optional[int] = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : Optional[Any] = model(**self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) )
UpperCAmelCase_ : List[Any] = outputs.attentions
self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ )
self.assertListEqual(
list(attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,)
UpperCAmelCase_ : Optional[Any] = len(lowerCamelCase_ )
# Check attention is always last and order is fine
UpperCAmelCase_ : Tuple = True
UpperCAmelCase_ : List[Any] = True
UpperCAmelCase_ : Tuple = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : Union[str, Any] = model(**self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) )
if hasattr(self.model_tester ,"""num_hidden_states_types""" ):
UpperCAmelCase_ : List[Any] = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
UpperCAmelCase_ : List[str] = 2
self.assertEqual(out_len + added_hidden_states ,len(lowerCamelCase_ ) )
UpperCAmelCase_ : Any = outputs.attentions
self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,)
def A__ ( self: List[str] ,lowerCamelCase_: Dict ,lowerCamelCase_: Tuple ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Optional[int] ) -> List[Any]:
UpperCAmelCase_ : str = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : int = model(**self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) )
UpperCAmelCase_ : List[str] = outputs.hidden_states
UpperCAmelCase_ : Optional[Any] = getattr(
self.model_tester ,"""expected_num_hidden_layers""" ,len(self.model_tester.depths ) + 1 )
self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ )
# Swinv2 has a different seq_length
UpperCAmelCase_ : Optional[Any] = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCAmelCase_ : int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
UpperCAmelCase_ : Optional[int] = outputs.reshaped_hidden_states
self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = reshaped_hidden_states[0].shape
UpperCAmelCase_ : Optional[Any] = (
reshaped_hidden_states[0].view(lowerCamelCase_ ,lowerCamelCase_ ,height * width ).permute(0 ,2 ,1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
def A__ ( self: Any ) -> int:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Dict = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
UpperCAmelCase_ : Any = True
self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ : str = True
self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
def A__ ( self: List[str] ) -> Dict:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Union[str, Any] = 3
UpperCAmelCase_ : Optional[int] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
UpperCAmelCase_ : List[str] = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCAmelCase_ : List[Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
UpperCAmelCase_ : Optional[Any] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
UpperCAmelCase_ : Optional[Any] = True
self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,(padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ : List[str] = True
self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,(padded_height, padded_width) )
def A__ ( self: Optional[int] ) -> str:
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase_ )
def A__ ( self: Union[str, Any] ) -> Dict:
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
@slow
def A__ ( self: str ) -> Tuple:
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Dict = SwinvaModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def A__ ( self: Any ) -> int:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : List[str] = _config_zero_init(lowerCamelCase_ )
for model_class in self.all_model_classes:
UpperCAmelCase_ : int = model_class(config=lowerCamelCase_ )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' ,)
@require_vision
@require_torch
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def A__ ( self: Dict ) -> Optional[Any]:
return (
AutoImageProcessor.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" )
if is_vision_available()
else None
)
@slow
def A__ ( self: str ) -> List[Any]:
UpperCAmelCase_ : Tuple = SwinvaForImageClassification.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" ).to(
lowerCamelCase_ )
UpperCAmelCase_ : Any = self.default_image_processor
UpperCAmelCase_ : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
UpperCAmelCase_ : Optional[int] = image_processor(images=lowerCamelCase_ ,return_tensors="""pt""" ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : Optional[Any] = model(**lowerCamelCase_ )
# verify the logits
UpperCAmelCase_ : Dict = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,lowerCamelCase_ )
UpperCAmelCase_ : Any = torch.tensor([-0.3_9_4_7, -0.4_3_0_6, 0.0_0_2_6] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,lowerCamelCase_ ,atol=1e-4 ) )
| 345 | 1 |
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class _snake_case :
'''simple docstring'''
def __init__( self: Dict ,lowerCamelCase_: List[Any] ,lowerCamelCase_: List[str]=13 ,lowerCamelCase_: int=30 ,lowerCamelCase_: Optional[int]=2 ,lowerCamelCase_: Union[str, Any]=3 ,lowerCamelCase_: Optional[Any]=True ,lowerCamelCase_: Any=True ,lowerCamelCase_: str=32 ,lowerCamelCase_: int=5 ,lowerCamelCase_: Optional[int]=4 ,lowerCamelCase_: Any=37 ,lowerCamelCase_: List[str]="gelu" ,lowerCamelCase_: Tuple=0.1 ,lowerCamelCase_: Dict=0.1 ,lowerCamelCase_: Any=10 ,lowerCamelCase_: Dict=0.0_2 ,lowerCamelCase_: Union[str, Any]=3 ,lowerCamelCase_: List[Any]=None ,lowerCamelCase_: str=2 ,) -> Union[str, Any]:
UpperCAmelCase_ : List[str] = parent
UpperCAmelCase_ : int = batch_size
UpperCAmelCase_ : List[Any] = image_size
UpperCAmelCase_ : int = patch_size
UpperCAmelCase_ : List[Any] = num_channels
UpperCAmelCase_ : str = is_training
UpperCAmelCase_ : Any = use_labels
UpperCAmelCase_ : int = hidden_size
UpperCAmelCase_ : str = num_hidden_layers
UpperCAmelCase_ : List[Any] = num_attention_heads
UpperCAmelCase_ : int = intermediate_size
UpperCAmelCase_ : Optional[Any] = hidden_act
UpperCAmelCase_ : Any = hidden_dropout_prob
UpperCAmelCase_ : Tuple = attention_probs_dropout_prob
UpperCAmelCase_ : List[Any] = type_sequence_label_size
UpperCAmelCase_ : int = initializer_range
UpperCAmelCase_ : Optional[Any] = scope
UpperCAmelCase_ : Optional[int] = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
UpperCAmelCase_ : Optional[Any] = (image_size // patch_size) ** 2
UpperCAmelCase_ : Union[str, Any] = num_patches + 2
def A__ ( self: List[Any] ) -> Dict:
UpperCAmelCase_ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ : str = None
if self.use_labels:
UpperCAmelCase_ : int = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase_ : List[str] = self.get_config()
return config, pixel_values, labels
def A__ ( self: Optional[int] ) -> int:
return DeiTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=lowerCamelCase_ ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,)
def A__ ( self: Optional[int] ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: Any ,lowerCamelCase_: Any ) -> str:
UpperCAmelCase_ : Tuple = DeiTModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : str = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def A__ ( self: str ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: int ,lowerCamelCase_: Dict ) -> Any:
UpperCAmelCase_ : str = DeiTForMaskedImageModeling(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Tuple = model(lowerCamelCase_ )
self.parent.assertEqual(
result.reconstruction.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCAmelCase_ : Dict = 1
UpperCAmelCase_ : int = DeiTForMaskedImageModeling(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ : Tuple = model(lowerCamelCase_ )
self.parent.assertEqual(result.reconstruction.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def A__ ( self: int ,lowerCamelCase_: Tuple ,lowerCamelCase_: str ,lowerCamelCase_: Dict ) -> Any:
UpperCAmelCase_ : Any = self.type_sequence_label_size
UpperCAmelCase_ : Union[str, Any] = DeiTForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Dict = model(lowerCamelCase_ ,labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase_ : Optional[int] = 1
UpperCAmelCase_ : Optional[Any] = DeiTForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ : int = model(lowerCamelCase_ ,labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def A__ ( self: Dict ) -> Optional[int]:
UpperCAmelCase_ : Optional[Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : int = config_and_inputs
UpperCAmelCase_ : str = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _snake_case ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
A__ : Dict = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
A__ : Optional[Any] = (
{
"feature-extraction": DeiTModel,
"image-classification": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
A__ : Dict = False
A__ : List[Any] = False
A__ : Any = False
def A__ ( self: Dict ) -> Optional[int]:
UpperCAmelCase_ : Optional[Any] = DeiTModelTester(self )
UpperCAmelCase_ : str = ConfigTester(self ,config_class=lowerCamelCase_ ,has_text_modality=lowerCamelCase_ ,hidden_size=37 )
def A__ ( self: str ) -> str:
self.config_tester.run_common_tests()
@unittest.skip(reason="""DeiT does not use inputs_embeds""" )
def A__ ( self: str ) -> Union[str, Any]:
pass
def A__ ( self: Optional[int] ) -> Optional[int]:
UpperCAmelCase_ , UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Optional[Any] = model_class(lowerCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
UpperCAmelCase_ : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase_ ,nn.Linear ) )
def A__ ( self: Union[str, Any] ) -> int:
UpperCAmelCase_ , UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : List[str] = model_class(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : str = [*signature.parameters.keys()]
UpperCAmelCase_ : List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,lowerCamelCase_ )
def A__ ( self: List[Any] ) -> Any:
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def A__ ( self: str ) -> Dict:
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase_ )
def A__ ( self: List[str] ) -> List[str]:
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
def A__ ( self: Tuple ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: Tuple=False ) -> Dict:
UpperCAmelCase_ : Dict = super()._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ,return_labels=lowerCamelCase_ )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def A__ ( self: List[str] ) -> List[Any]:
if not self.model_tester.is_training:
return
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Any = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(lowerCamelCase_ )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
UpperCAmelCase_ : str = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.train()
UpperCAmelCase_ : List[Any] = self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ,return_labels=lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = model(**lowerCamelCase_ ).loss
loss.backward()
def A__ ( self: List[str] ) -> Tuple:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
UpperCAmelCase_ : Optional[Any] = False
UpperCAmelCase_ : Any = True
for model_class in self.all_model_classes:
if model_class in get_values(lowerCamelCase_ ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
UpperCAmelCase_ : List[Any] = model_class(lowerCamelCase_ )
model.gradient_checkpointing_enable()
model.to(lowerCamelCase_ )
model.train()
UpperCAmelCase_ : Dict = self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ,return_labels=lowerCamelCase_ )
UpperCAmelCase_ : Dict = model(**lowerCamelCase_ ).loss
loss.backward()
def A__ ( self: Any ) -> List[Any]:
UpperCAmelCase_ , UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Dict = [
{"""title""": """multi_label_classification""", """num_labels""": 2, """dtype""": torch.float},
{"""title""": """single_label_classification""", """num_labels""": 1, """dtype""": torch.long},
{"""title""": """regression""", """num_labels""": 1, """dtype""": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(lowerCamelCase_ ),
*get_values(lowerCamelCase_ ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F'''Testing {model_class} with {problem_type['title']}''' ):
UpperCAmelCase_ : List[str] = problem_type["""title"""]
UpperCAmelCase_ : List[Any] = problem_type["""num_labels"""]
UpperCAmelCase_ : int = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.train()
UpperCAmelCase_ : List[str] = self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ,return_labels=lowerCamelCase_ )
if problem_type["num_labels"] > 1:
UpperCAmelCase_ : Any = inputs["""labels"""].unsqueeze(1 ).repeat(1 ,problem_type["""num_labels"""] )
UpperCAmelCase_ : Dict = inputs["""labels"""].to(problem_type["""dtype"""] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=lowerCamelCase_ ) as warning_list:
UpperCAmelCase_ : Union[str, Any] = model(**lowerCamelCase_ ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F'''Something is going wrong in the regression problem: intercepted {w.message}''' )
loss.backward()
@slow
def A__ ( self: Any ) -> str:
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Tuple = DeiTModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def lowerCamelCase_ ( ):
'''simple docstring'''
UpperCAmelCase_ : str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def A__ ( self: List[Any] ) -> int:
return (
DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
if is_vision_available()
else None
)
@slow
def A__ ( self: Dict ) -> int:
UpperCAmelCase_ : Any = DeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""" ).to(
lowerCamelCase_ )
UpperCAmelCase_ : List[str] = self.default_image_processor
UpperCAmelCase_ : Union[str, Any] = prepare_img()
UpperCAmelCase_ : List[str] = image_processor(images=lowerCamelCase_ ,return_tensors="""pt""" ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : str = model(**lowerCamelCase_ )
# verify the logits
UpperCAmelCase_ : Dict = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = torch.tensor([-1.0_2_6_6, 0.1_9_1_2, -1.2_8_6_1] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,lowerCamelCase_ ,atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def A__ ( self: str ) -> Tuple:
UpperCAmelCase_ : Union[str, Any] = DeiTModel.from_pretrained(
"""facebook/deit-base-distilled-patch16-224""" ,torch_dtype=torch.floataa ,device_map="""auto""" )
UpperCAmelCase_ : Optional[int] = self.default_image_processor
UpperCAmelCase_ : Optional[int] = prepare_img()
UpperCAmelCase_ : Tuple = image_processor(images=lowerCamelCase_ ,return_tensors="""pt""" )
UpperCAmelCase_ : Tuple = inputs.pixel_values.to(lowerCamelCase_ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
UpperCAmelCase_ : List[Any] = model(lowerCamelCase_ )
| 345 |
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
# See all LED models at https://huggingface.co/models?filter=LED
UpperCamelCase_ = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
UpperCamelCase_ = {
'''allenai/led-base-16384''': 16384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def lowerCamelCase_ ( ):
'''simple docstring'''
UpperCAmelCase_ : int = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
UpperCAmelCase_ : Dict = bs[:]
UpperCAmelCase_ : Any = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_a )
cs.append(2**8 + n )
n += 1
UpperCAmelCase_ : Any = [chr(_a ) for n in cs]
return dict(zip(_a , _a ) )
def lowerCamelCase_ ( _a : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = set()
UpperCAmelCase_ : List[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase_ : Optional[int] = char
return pairs
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : str = VOCAB_FILES_NAMES
A__ : List[str] = PRETRAINED_VOCAB_FILES_MAP
A__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Optional[int] = ["input_ids", "attention_mask"]
def __init__( self: Union[str, Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Any ,lowerCamelCase_: Union[str, Any]="replace" ,lowerCamelCase_: Optional[Any]="<s>" ,lowerCamelCase_: List[Any]="</s>" ,lowerCamelCase_: List[str]="</s>" ,lowerCamelCase_: int="<s>" ,lowerCamelCase_: int="<unk>" ,lowerCamelCase_: str="<pad>" ,lowerCamelCase_: Optional[Any]="<mask>" ,lowerCamelCase_: List[str]=False ,**lowerCamelCase_: Tuple ,) -> Any:
UpperCAmelCase_ : Union[str, Any] = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else bos_token
UpperCAmelCase_ : int = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else eos_token
UpperCAmelCase_ : List[str] = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else sep_token
UpperCAmelCase_ : List[str] = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else cls_token
UpperCAmelCase_ : Optional[Any] = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else unk_token
UpperCAmelCase_ : List[str] = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase_ : str = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else mask_token
super().__init__(
errors=lowerCamelCase_ ,bos_token=lowerCamelCase_ ,eos_token=lowerCamelCase_ ,unk_token=lowerCamelCase_ ,sep_token=lowerCamelCase_ ,cls_token=lowerCamelCase_ ,pad_token=lowerCamelCase_ ,mask_token=lowerCamelCase_ ,add_prefix_space=lowerCamelCase_ ,**lowerCamelCase_ ,)
with open(lowerCamelCase_ ,encoding="""utf-8""" ) as vocab_handle:
UpperCAmelCase_ : Union[str, Any] = json.load(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = {v: k for k, v in self.encoder.items()}
UpperCAmelCase_ : Any = errors # how to handle errors in decoding
UpperCAmelCase_ : int = bytes_to_unicode()
UpperCAmelCase_ : Dict = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCamelCase_ ,encoding="""utf-8""" ) as merges_handle:
UpperCAmelCase_ : Any = merges_handle.read().split("""\n""" )[1:-1]
UpperCAmelCase_ : int = [tuple(merge.split() ) for merge in bpe_merges]
UpperCAmelCase_ : Union[str, Any] = dict(zip(lowerCamelCase_ ,range(len(lowerCamelCase_ ) ) ) )
UpperCAmelCase_ : Tuple = {}
UpperCAmelCase_ : Optional[int] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCAmelCase_ : int = re.compile(R"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def A__ ( self: List[str] ) -> List[str]:
return len(self.encoder )
def A__ ( self: Any ) -> Union[str, Any]:
return dict(self.encoder ,**self.added_tokens_encoder )
def A__ ( self: Tuple ,lowerCamelCase_: Dict ) -> Optional[Any]:
if token in self.cache:
return self.cache[token]
UpperCAmelCase_ : Union[str, Any] = tuple(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = get_pairs(lowerCamelCase_ )
if not pairs:
return token
while True:
UpperCAmelCase_ : Union[str, Any] = min(lowerCamelCase_ ,key=lambda lowerCamelCase_ : self.bpe_ranks.get(lowerCamelCase_ ,float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase_ , UpperCAmelCase_ : Any = bigram
UpperCAmelCase_ : Optional[Any] = []
UpperCAmelCase_ : List[str] = 0
while i < len(lowerCamelCase_ ):
try:
UpperCAmelCase_ : str = word.index(lowerCamelCase_ ,lowerCamelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCAmelCase_ : Union[str, Any] = j
if word[i] == first and i < len(lowerCamelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase_ : List[str] = tuple(lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = new_word
if len(lowerCamelCase_ ) == 1:
break
else:
UpperCAmelCase_ : List[str] = get_pairs(lowerCamelCase_ )
UpperCAmelCase_ : int = """ """.join(lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = word
return word
def A__ ( self: Union[str, Any] ,lowerCamelCase_: Tuple ) -> List[str]:
UpperCAmelCase_ : str = []
for token in re.findall(self.pat ,lowerCamelCase_ ):
UpperCAmelCase_ : List[Any] = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCamelCase_ ).split(""" """ ) )
return bpe_tokens
def A__ ( self: List[Any] ,lowerCamelCase_: Optional[Any] ) -> Optional[int]:
return self.encoder.get(lowerCamelCase_ ,self.encoder.get(self.unk_token ) )
def A__ ( self: List[str] ,lowerCamelCase_: str ) -> Optional[Any]:
return self.decoder.get(lowerCamelCase_ )
def A__ ( self: List[str] ,lowerCamelCase_: List[str] ) -> List[Any]:
UpperCAmelCase_ : str = """""".join(lowerCamelCase_ )
UpperCAmelCase_ : int = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" ,errors=self.errors )
return text
def A__ ( self: Optional[Any] ,lowerCamelCase_: str ,lowerCamelCase_: Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(lowerCamelCase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase_ : List[Any] = os.path.join(
lowerCamelCase_ ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCAmelCase_ : List[str] = os.path.join(
lowerCamelCase_ ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=lowerCamelCase_ ,ensure_ascii=lowerCamelCase_ ) + """\n""" )
UpperCAmelCase_ : str = 0
with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda lowerCamelCase_ : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
""" Please check that the tokenizer is not corrupted!""" )
UpperCAmelCase_ : Tuple = token_index
writer.write(""" """.join(lowerCamelCase_ ) + """\n""" )
index += 1
return vocab_file, merge_file
def A__ ( self: str ,lowerCamelCase_: List[int] ,lowerCamelCase_: Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase_ : int = [self.cls_token_id]
UpperCAmelCase_ : Optional[int] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A__ ( self: Union[str, Any] ,lowerCamelCase_: List[int] ,lowerCamelCase_: Optional[List[int]] = None ,lowerCamelCase_: bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_ ,token_ids_a=lowerCamelCase_ ,already_has_special_tokens=lowerCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase_ )) + [1]
return [1] + ([0] * len(lowerCamelCase_ )) + [1, 1] + ([0] * len(lowerCamelCase_ )) + [1]
def A__ ( self: str ,lowerCamelCase_: List[int] ,lowerCamelCase_: Optional[List[int]] = None ) -> List[int]:
UpperCAmelCase_ : Optional[Any] = [self.sep_token_id]
UpperCAmelCase_ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A__ ( self: Optional[Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: str=False ,**lowerCamelCase_: List[str] ) -> Optional[int]:
UpperCAmelCase_ : Optional[int] = kwargs.pop("""add_prefix_space""" ,self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCamelCase_ ) > 0 and not text[0].isspace()):
UpperCAmelCase_ : Dict = """ """ + text
return (text, kwargs)
def A__ ( self: List[str] ,lowerCamelCase_: Union[Dict[str, EncodedInput], BatchEncoding] ,lowerCamelCase_: Optional[int] = None ,lowerCamelCase_: PaddingStrategy = PaddingStrategy.DO_NOT_PAD ,lowerCamelCase_: Optional[int] = None ,lowerCamelCase_: Optional[bool] = None ,) -> dict:
UpperCAmelCase_ : Optional[int] = super()._pad(
encoded_inputs=lowerCamelCase_ ,max_length=lowerCamelCase_ ,padding_strategy=lowerCamelCase_ ,pad_to_multiple_of=lowerCamelCase_ ,return_attention_mask=lowerCamelCase_ ,)
# Load from model defaults
if return_attention_mask is None:
UpperCAmelCase_ : str = """attention_mask""" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
UpperCAmelCase_ : str = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
UpperCAmelCase_ : List[Any] = len(encoded_inputs["""global_attention_mask"""] ) != len(lowerCamelCase_ )
if needs_to_be_padded:
UpperCAmelCase_ : Dict = len(lowerCamelCase_ ) - len(encoded_inputs["""global_attention_mask"""] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
UpperCAmelCase_ : str = (
encoded_inputs["""global_attention_mask"""] + [-1] * difference
)
elif self.padding_side == "left":
UpperCAmelCase_ : List[str] = [-1] * difference + encoded_inputs[
"""global_attention_mask"""
]
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return encoded_inputs
| 345 | 1 |
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _snake_case :
'''simple docstring'''
def __init__( self: int ,lowerCamelCase_: int ,lowerCamelCase_: Union[str, Any]=13 ,lowerCamelCase_: List[Any]=30 ,lowerCamelCase_: Union[str, Any]=2 ,lowerCamelCase_: Any=3 ,lowerCamelCase_: Optional[Any]=True ,lowerCamelCase_: Dict=True ,lowerCamelCase_: Union[str, Any]=32 ,lowerCamelCase_: Optional[Any]=5 ,lowerCamelCase_: Dict=4 ,lowerCamelCase_: Optional[Any]=37 ,lowerCamelCase_: List[Any]="gelu" ,lowerCamelCase_: Tuple=0.1 ,lowerCamelCase_: List[str]=0.1 ,lowerCamelCase_: Tuple=10 ,lowerCamelCase_: List[str]=0.0_2 ,lowerCamelCase_: Optional[int]=None ,lowerCamelCase_: str=2 ,) -> List[Any]:
UpperCAmelCase_ : Optional[int] = parent
UpperCAmelCase_ : Union[str, Any] = batch_size
UpperCAmelCase_ : int = image_size
UpperCAmelCase_ : List[str] = patch_size
UpperCAmelCase_ : int = num_channels
UpperCAmelCase_ : Any = is_training
UpperCAmelCase_ : List[Any] = use_labels
UpperCAmelCase_ : Optional[Any] = hidden_size
UpperCAmelCase_ : List[Any] = num_hidden_layers
UpperCAmelCase_ : Dict = num_attention_heads
UpperCAmelCase_ : Any = intermediate_size
UpperCAmelCase_ : Optional[int] = hidden_act
UpperCAmelCase_ : Dict = hidden_dropout_prob
UpperCAmelCase_ : Tuple = attention_probs_dropout_prob
UpperCAmelCase_ : Any = type_sequence_label_size
UpperCAmelCase_ : Optional[int] = initializer_range
UpperCAmelCase_ : List[str] = scope
UpperCAmelCase_ : int = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase_ : Tuple = (image_size // patch_size) ** 2
UpperCAmelCase_ : Dict = num_patches + 1
def A__ ( self: str ) -> Optional[Any]:
UpperCAmelCase_ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ : Optional[int] = None
if self.use_labels:
UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase_ : Dict = self.get_config()
return config, pixel_values, labels
def A__ ( self: List[Any] ) -> Tuple:
return ViTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=lowerCamelCase_ ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,)
def A__ ( self: Optional[Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Tuple ,lowerCamelCase_: int ) -> List[Any]:
UpperCAmelCase_ : Dict = ViTModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Tuple = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def A__ ( self: str ,lowerCamelCase_: Dict ,lowerCamelCase_: str ,lowerCamelCase_: Optional[int] ) -> Optional[Any]:
UpperCAmelCase_ : List[Any] = ViTForMaskedImageModeling(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(lowerCamelCase_ )
self.parent.assertEqual(
result.reconstruction.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCAmelCase_ : Optional[int] = 1
UpperCAmelCase_ : Dict = ViTForMaskedImageModeling(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ : List[str] = model(lowerCamelCase_ )
self.parent.assertEqual(result.reconstruction.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def A__ ( self: Dict ,lowerCamelCase_: Dict ,lowerCamelCase_: str ,lowerCamelCase_: int ) -> Any:
UpperCAmelCase_ : int = self.type_sequence_label_size
UpperCAmelCase_ : Optional[Any] = ViTForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ ,labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase_ : Union[str, Any] = 1
UpperCAmelCase_ : Optional[int] = ViTForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ : Optional[int] = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def A__ ( self: Optional[Any] ) -> Tuple:
UpperCAmelCase_ : List[str] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : Dict = config_and_inputs
UpperCAmelCase_ : Optional[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _snake_case ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
A__ : Any = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
A__ : Tuple = (
{"feature-extraction": ViTModel, "image-classification": ViTForImageClassification}
if is_torch_available()
else {}
)
A__ : List[Any] = True
A__ : Optional[int] = False
A__ : List[Any] = False
A__ : Tuple = False
def A__ ( self: int ) -> str:
UpperCAmelCase_ : List[str] = ViTModelTester(self )
UpperCAmelCase_ : Optional[Any] = ConfigTester(self ,config_class=lowerCamelCase_ ,has_text_modality=lowerCamelCase_ ,hidden_size=37 )
def A__ ( self: Dict ) -> List[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViT does not use inputs_embeds""" )
def A__ ( self: Any ) -> Union[str, Any]:
pass
def A__ ( self: List[Any] ) -> Union[str, Any]:
UpperCAmelCase_ , UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : List[Any] = model_class(lowerCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
UpperCAmelCase_ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase_ ,nn.Linear ) )
def A__ ( self: List[Any] ) -> List[str]:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : List[str] = model_class(lowerCamelCase_ )
UpperCAmelCase_ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : int = [*signature.parameters.keys()]
UpperCAmelCase_ : Dict = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,lowerCamelCase_ )
def A__ ( self: Any ) -> Union[str, Any]:
UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def A__ ( self: str ) -> List[Any]:
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase_ )
def A__ ( self: List[Any] ) -> Dict:
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
@slow
def A__ ( self: Union[str, Any] ) -> List[str]:
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Dict = ViTModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def lowerCamelCase_ ( ):
'''simple docstring'''
UpperCAmelCase_ : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def A__ ( self: str ) -> Dict:
return ViTImageProcessor.from_pretrained("""google/vit-base-patch16-224""" ) if is_vision_available() else None
@slow
def A__ ( self: List[str] ) -> Tuple:
UpperCAmelCase_ : int = ViTForImageClassification.from_pretrained("""google/vit-base-patch16-224""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : Tuple = self.default_image_processor
UpperCAmelCase_ : int = prepare_img()
UpperCAmelCase_ : List[str] = image_processor(images=lowerCamelCase_ ,return_tensors="""pt""" ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : Any = model(**lowerCamelCase_ )
# verify the logits
UpperCAmelCase_ : List[str] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = torch.tensor([-0.2_7_4_4, 0.8_2_1_5, -0.0_8_3_6] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,lowerCamelCase_ ,atol=1e-4 ) )
@slow
def A__ ( self: Optional[int] ) -> List[Any]:
# ViT models have an `interpolate_pos_encoding` argument in their forward method,
# allowing to interpolate the pre-trained position embeddings in order to use
# the model on higher resolutions. The DINO model by Facebook AI leverages this
# to visualize self-attention on higher resolution images.
UpperCAmelCase_ : Optional[Any] = ViTModel.from_pretrained("""facebook/dino-vits8""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : Any = ViTImageProcessor.from_pretrained("""facebook/dino-vits8""" ,size=480 )
UpperCAmelCase_ : Union[str, Any] = prepare_img()
UpperCAmelCase_ : str = image_processor(images=lowerCamelCase_ ,return_tensors="""pt""" )
UpperCAmelCase_ : int = inputs.pixel_values.to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : Tuple = model(lowerCamelCase_ ,interpolate_pos_encoding=lowerCamelCase_ )
# verify the logits
UpperCAmelCase_ : List[str] = torch.Size((1, 3601, 384) )
self.assertEqual(outputs.last_hidden_state.shape ,lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = torch.tensor(
[[4.2_3_4_0, 4.3_9_0_6, -6.6_6_9_2], [4.5_4_6_3, 1.8_9_2_8, -6.7_2_5_7], [4.4_4_2_9, 0.8_4_9_6, -5.8_5_8_5]] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] ,lowerCamelCase_ ,atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def A__ ( self: int ) -> Dict:
UpperCAmelCase_ : Dict = ViTModel.from_pretrained("""facebook/dino-vits8""" ,torch_dtype=torch.floataa ,device_map="""auto""" )
UpperCAmelCase_ : Tuple = self.default_image_processor
UpperCAmelCase_ : Optional[int] = prepare_img()
UpperCAmelCase_ : Any = image_processor(images=lowerCamelCase_ ,return_tensors="""pt""" )
UpperCAmelCase_ : Union[str, Any] = inputs.pixel_values.to(lowerCamelCase_ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
UpperCAmelCase_ : Dict = model(lowerCamelCase_ )
| 345 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: Union[str, Any] ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def A__ ( self: List[str] ) -> Dict:
UpperCAmelCase_ : Union[str, Any] = 1
UpperCAmelCase_ : Tuple = 3
UpperCAmelCase_ : Optional[Any] = (32, 32)
UpperCAmelCase_ : Optional[int] = floats_tensor((batch_size, num_channels) + sizes ,rng=random.Random(0 ) ).to(lowerCamelCase_ )
return image
@property
def A__ ( self: List[Any] ) -> Optional[Any]:
torch.manual_seed(0 )
UpperCAmelCase_ : int = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") ,up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") ,cross_attention_dim=32 ,)
return model
@property
def A__ ( self: str ) -> List[str]:
torch.manual_seed(0 )
UpperCAmelCase_ : Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,)
return model
@property
def A__ ( self: Optional[int] ) -> int:
torch.manual_seed(0 )
UpperCAmelCase_ : Dict = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,)
return CLIPTextModel(lowerCamelCase_ )
@property
def A__ ( self: Tuple ) -> Tuple:
def extract(*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: str ):
class _snake_case :
'''simple docstring'''
def __init__( self: List[Any] ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = torch.ones([0] )
def A__ ( self: List[Any] ,lowerCamelCase_: str ) -> int:
self.pixel_values.to(lowerCamelCase_ )
return self
return Out()
return extract
def A__ ( self: Union[str, Any] ) -> Tuple:
UpperCAmelCase_ : int = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : int = self.dummy_cond_unet
UpperCAmelCase_ : Optional[Any] = DDIMScheduler(
beta_start=0.0_0_0_8_5 ,beta_end=0.0_1_2 ,beta_schedule="""scaled_linear""" ,clip_sample=lowerCamelCase_ ,set_alpha_to_one=lowerCamelCase_ ,)
UpperCAmelCase_ : str = self.dummy_vae
UpperCAmelCase_ : List[str] = self.dummy_text_encoder
UpperCAmelCase_ : int = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
UpperCAmelCase_ : str = StableDiffusionPipeline(
unet=lowerCamelCase_ ,scheduler=lowerCamelCase_ ,vae=lowerCamelCase_ ,text_encoder=lowerCamelCase_ ,tokenizer=lowerCamelCase_ ,safety_checker=lowerCamelCase_ ,feature_extractor=self.dummy_extractor ,)
UpperCAmelCase_ : List[str] = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : List[str] = """A painting of a squirrel eating a burger"""
UpperCAmelCase_ : str = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
UpperCAmelCase_ : int = sd_pipe([prompt] ,generator=lowerCamelCase_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" )
UpperCAmelCase_ : List[Any] = output.images
UpperCAmelCase_ : str = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
UpperCAmelCase_ : Dict = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" ,return_dict=lowerCamelCase_ ,)[0]
UpperCAmelCase_ : int = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ : Tuple = np.array([0.5_7_5_6, 0.6_1_1_8, 0.5_0_0_5, 0.5_0_4_1, 0.5_4_7_1, 0.4_7_2_6, 0.4_9_7_6, 0.4_8_6_5, 0.4_8_6_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def A__ ( self: Optional[Any] ) -> Any:
UpperCAmelCase_ : Tuple = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : Dict = self.dummy_cond_unet
UpperCAmelCase_ : List[Any] = PNDMScheduler(skip_prk_steps=lowerCamelCase_ )
UpperCAmelCase_ : str = self.dummy_vae
UpperCAmelCase_ : Union[str, Any] = self.dummy_text_encoder
UpperCAmelCase_ : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
UpperCAmelCase_ : Any = StableDiffusionPipeline(
unet=lowerCamelCase_ ,scheduler=lowerCamelCase_ ,vae=lowerCamelCase_ ,text_encoder=lowerCamelCase_ ,tokenizer=lowerCamelCase_ ,safety_checker=lowerCamelCase_ ,feature_extractor=self.dummy_extractor ,)
UpperCAmelCase_ : int = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = """A painting of a squirrel eating a burger"""
UpperCAmelCase_ : Optional[Any] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
UpperCAmelCase_ : Optional[Any] = sd_pipe([prompt] ,generator=lowerCamelCase_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" )
UpperCAmelCase_ : str = output.images
UpperCAmelCase_ : Union[str, Any] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
UpperCAmelCase_ : int = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" ,return_dict=lowerCamelCase_ ,)[0]
UpperCAmelCase_ : Dict = image[0, -3:, -3:, -1]
UpperCAmelCase_ : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ : Tuple = np.array([0.5_1_2_5, 0.5_7_1_6, 0.4_8_2_8, 0.5_0_6_0, 0.5_6_5_0, 0.4_7_6_8, 0.5_1_8_5, 0.4_8_9_5, 0.4_9_9_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def A__ ( self: str ) -> Dict:
UpperCAmelCase_ : Any = StableDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-lms-pipe""" ,safety_checker=lowerCamelCase_ )
assert isinstance(lowerCamelCase_ ,lowerCamelCase_ )
assert isinstance(pipe.scheduler ,lowerCamelCase_ )
assert pipe.safety_checker is None
UpperCAmelCase_ : List[Any] = pipe("""example prompt""" ,num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCamelCase_ )
UpperCAmelCase_ : Any = StableDiffusionPipeline.from_pretrained(lowerCamelCase_ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
UpperCAmelCase_ : Optional[int] = pipe("""example prompt""" ,num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != """cuda""" ,"""This test requires a GPU""" )
def A__ ( self: List[str] ) -> Any:
UpperCAmelCase_ : Tuple = self.dummy_cond_unet
UpperCAmelCase_ : Dict = PNDMScheduler(skip_prk_steps=lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = self.dummy_vae
UpperCAmelCase_ : List[str] = self.dummy_text_encoder
UpperCAmelCase_ : Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# put models in fp16
UpperCAmelCase_ : Optional[Any] = unet.half()
UpperCAmelCase_ : Optional[int] = vae.half()
UpperCAmelCase_ : int = bert.half()
# make sure here that pndm scheduler skips prk
UpperCAmelCase_ : Any = StableDiffusionPipeline(
unet=lowerCamelCase_ ,scheduler=lowerCamelCase_ ,vae=lowerCamelCase_ ,text_encoder=lowerCamelCase_ ,tokenizer=lowerCamelCase_ ,safety_checker=lowerCamelCase_ ,feature_extractor=self.dummy_extractor ,)
UpperCAmelCase_ : List[Any] = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : Tuple = """A painting of a squirrel eating a burger"""
UpperCAmelCase_ : Optional[int] = sd_pipe([prompt] ,num_inference_steps=2 ,output_type="""np""" ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: Optional[int] ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self: List[str] ) -> List[Any]:
UpperCAmelCase_ : Tuple = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" ,safety_checker=lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
UpperCAmelCase_ : str = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : str = (
"""portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"""
""" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"""
""" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"""
""" children from bahnhof zoo, detailed """
)
UpperCAmelCase_ : Optional[int] = 4003660346
UpperCAmelCase_ : int = 7
# without safety guidance (sld_guidance_scale = 0)
UpperCAmelCase_ : Dict = torch.manual_seed(lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=0 ,)
UpperCAmelCase_ : Optional[int] = output.images
UpperCAmelCase_ : Union[str, Any] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Dict = [0.2_2_7_8, 0.2_2_3_1, 0.2_2_4_9, 0.2_3_3_3, 0.2_3_0_3, 0.1_8_8_5, 0.2_2_7_3, 0.2_1_4_4, 0.2_1_7_6]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
# without safety guidance (strong configuration)
UpperCAmelCase_ : Union[str, Any] = torch.manual_seed(lowerCamelCase_ )
UpperCAmelCase_ : Any = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=2000 ,sld_warmup_steps=7 ,sld_threshold=0.0_2_5 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,)
UpperCAmelCase_ : Tuple = output.images
UpperCAmelCase_ : Union[str, Any] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : str = [0.2_3_8_3, 0.2_2_7_6, 0.2_3_6, 0.2_1_9_2, 0.2_1_8_6, 0.2_0_5_3, 0.1_9_7_1, 0.1_9_0_1, 0.1_7_1_9]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A__ ( self: Optional[int] ) -> Any:
UpperCAmelCase_ : Any = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" ,safety_checker=lowerCamelCase_ )
UpperCAmelCase_ : Any = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
UpperCAmelCase_ : str = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : Any = """padme amidala taking a bath artwork, safe for work, no nudity"""
UpperCAmelCase_ : List[Any] = 2734971755
UpperCAmelCase_ : Optional[Any] = 7
UpperCAmelCase_ : int = torch.manual_seed(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=0 ,)
UpperCAmelCase_ : Dict = output.images
UpperCAmelCase_ : Tuple = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Optional[Any] = [0.3_5_0_2, 0.3_6_2_2, 0.3_3_9_6, 0.3_6_4_2, 0.3_4_7_8, 0.3_3_1_8, 0.3_5, 0.3_3_4_8, 0.3_2_9_7]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
UpperCAmelCase_ : Any = torch.manual_seed(lowerCamelCase_ )
UpperCAmelCase_ : Tuple = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=2000 ,sld_warmup_steps=7 ,sld_threshold=0.0_2_5 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,)
UpperCAmelCase_ : Dict = output.images
UpperCAmelCase_ : List[Any] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Tuple = [0.5_5_3_1, 0.5_2_0_6, 0.4_8_9_5, 0.5_1_5_6, 0.5_1_8_2, 0.4_7_5_1, 0.4_8_0_2, 0.4_8_0_3, 0.4_4_4_3]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A__ ( self: Union[str, Any] ) -> int:
UpperCAmelCase_ : List[Any] = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" )
UpperCAmelCase_ : List[str] = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : Any = (
"""the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."""
""" leyendecker"""
)
UpperCAmelCase_ : Optional[Any] = 1044355234
UpperCAmelCase_ : List[str] = 12
UpperCAmelCase_ : List[Any] = torch.manual_seed(lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=0 ,)
UpperCAmelCase_ : Any = output.images
UpperCAmelCase_ : Dict = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Optional[Any] = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-7
UpperCAmelCase_ : Optional[int] = torch.manual_seed(lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=2000 ,sld_warmup_steps=7 ,sld_threshold=0.0_2_5 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,)
UpperCAmelCase_ : List[str] = output.images
UpperCAmelCase_ : Any = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Any = np.array([0.5_8_1_8, 0.6_2_8_5, 0.6_8_3_5, 0.6_0_1_9, 0.6_2_5, 0.6_7_5_4, 0.6_0_9_6, 0.6_3_3_4, 0.6_5_6_1] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 345 | 1 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
UpperCamelCase_ = get_tests_dir('''fixtures''')
UpperCamelCase_ = get_tests_dir('''fixtures/dummy_feature_extractor_config.json''')
UpperCamelCase_ = get_tests_dir('''fixtures/dummy-config.json''')
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: Optional[Any] ) -> List[Any]:
UpperCAmelCase_ : Dict = 0
def A__ ( self: List[Any] ) -> List[str]:
UpperCAmelCase_ : List[Any] = AutoFeatureExtractor.from_pretrained("""facebook/wav2vec2-base-960h""" )
self.assertIsInstance(lowerCamelCase_ ,lowerCamelCase_ )
def A__ ( self: int ) -> Tuple:
UpperCAmelCase_ : Any = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ ,lowerCamelCase_ )
def A__ ( self: str ) -> List[str]:
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ : List[str] = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
UpperCAmelCase_ : List[str] = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ ).to_dict()
config_dict.pop("""feature_extractor_type""" )
UpperCAmelCase_ : str = WavaVecaFeatureExtractor(**lowerCamelCase_ )
# save in new folder
model_config.save_pretrained(lowerCamelCase_ )
config.save_pretrained(lowerCamelCase_ )
UpperCAmelCase_ : Dict = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ )
# make sure private variable is not incorrectly saved
UpperCAmelCase_ : Any = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(lowerCamelCase_ ,lowerCamelCase_ )
def A__ ( self: Optional[int] ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[int] = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ ,lowerCamelCase_ )
def A__ ( self: Any ) -> List[Any]:
with self.assertRaisesRegex(
lowerCamelCase_ ,"""bert-base is not a local folder and is not a valid model identifier""" ):
UpperCAmelCase_ : Dict = AutoFeatureExtractor.from_pretrained("""bert-base""" )
def A__ ( self: Tuple ) -> int:
with self.assertRaisesRegex(
lowerCamelCase_ ,R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
UpperCAmelCase_ : List[str] = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ ,revision="""aaaaaa""" )
def A__ ( self: str ) -> int:
with self.assertRaisesRegex(
lowerCamelCase_ ,"""hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" ,):
UpperCAmelCase_ : Tuple = AutoFeatureExtractor.from_pretrained("""hf-internal-testing/config-no-model""" )
def A__ ( self: Tuple ) -> Optional[int]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(lowerCamelCase_ ):
UpperCAmelCase_ : Any = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowerCamelCase_ ):
UpperCAmelCase_ : Optional[Any] = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" ,trust_remote_code=lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" ,trust_remote_code=lowerCamelCase_ )
self.assertEqual(feature_extractor.__class__.__name__ ,"""NewFeatureExtractor""" )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ ,trust_remote_code=lowerCamelCase_ )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ ,"""NewFeatureExtractor""" )
def A__ ( self: Dict ) -> Dict:
try:
AutoConfig.register("""custom""" ,lowerCamelCase_ )
AutoFeatureExtractor.register(lowerCamelCase_ ,lowerCamelCase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCamelCase_ ):
AutoFeatureExtractor.register(lowerCamelCase_ ,lowerCamelCase_ )
# Now that the config is registered, it can be used as any other config with the auto-API
UpperCAmelCase_ : int = CustomFeatureExtractor.from_pretrained(lowerCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowerCamelCase_ )
UpperCAmelCase_ : Tuple = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ ,lowerCamelCase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def A__ ( self: Any ) -> Optional[Any]:
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : Optional[int] = True
try:
AutoConfig.register("""custom""" ,lowerCamelCase_ )
AutoFeatureExtractor.register(lowerCamelCase_ ,lowerCamelCase_ )
# If remote code is not set, the default is to use local
UpperCAmelCase_ : Dict = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
self.assertEqual(feature_extractor.__class__.__name__ ,"""NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
UpperCAmelCase_ : Any = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" ,trust_remote_code=lowerCamelCase_ )
self.assertEqual(feature_extractor.__class__.__name__ ,"""NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
UpperCAmelCase_ : Union[str, Any] = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" ,trust_remote_code=lowerCamelCase_ )
self.assertEqual(feature_extractor.__class__.__name__ ,"""NewFeatureExtractor""" )
self.assertTrue(not hasattr(lowerCamelCase_ ,"""is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 345 |
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class _snake_case :
'''simple docstring'''
def __init__( self: Optional[int] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Tuple=13 ,lowerCamelCase_: int=7 ,lowerCamelCase_: Union[str, Any]=True ,lowerCamelCase_: Dict=True ,lowerCamelCase_: str=True ,lowerCamelCase_: Tuple=True ,lowerCamelCase_: int=99 ,lowerCamelCase_: List[str]=64 ,lowerCamelCase_: Tuple=32 ,lowerCamelCase_: List[str]=5 ,lowerCamelCase_: str=4 ,lowerCamelCase_: str=37 ,lowerCamelCase_: Union[str, Any]="gelu" ,lowerCamelCase_: Union[str, Any]=0.1 ,lowerCamelCase_: str=0.1 ,lowerCamelCase_: List[str]=512 ,lowerCamelCase_: Dict=16 ,lowerCamelCase_: List[str]=2 ,lowerCamelCase_: List[str]=0.0_2 ,lowerCamelCase_: Optional[Any]=3 ,lowerCamelCase_: Union[str, Any]=4 ,lowerCamelCase_: str=None ,) -> List[str]:
UpperCAmelCase_ : Any = parent
UpperCAmelCase_ : List[Any] = batch_size
UpperCAmelCase_ : Union[str, Any] = seq_length
UpperCAmelCase_ : Optional[int] = is_training
UpperCAmelCase_ : Dict = use_input_mask
UpperCAmelCase_ : Any = use_token_type_ids
UpperCAmelCase_ : Tuple = use_labels
UpperCAmelCase_ : List[Any] = vocab_size
UpperCAmelCase_ : str = hidden_size
UpperCAmelCase_ : List[str] = embedding_size
UpperCAmelCase_ : List[Any] = num_hidden_layers
UpperCAmelCase_ : List[Any] = num_attention_heads
UpperCAmelCase_ : List[Any] = intermediate_size
UpperCAmelCase_ : Tuple = hidden_act
UpperCAmelCase_ : str = hidden_dropout_prob
UpperCAmelCase_ : List[str] = attention_probs_dropout_prob
UpperCAmelCase_ : Any = max_position_embeddings
UpperCAmelCase_ : List[str] = type_vocab_size
UpperCAmelCase_ : Any = type_sequence_label_size
UpperCAmelCase_ : Optional[Any] = initializer_range
UpperCAmelCase_ : Optional[int] = num_labels
UpperCAmelCase_ : Optional[int] = num_choices
UpperCAmelCase_ : List[str] = scope
def A__ ( self: Any ) -> Optional[int]:
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase_ : List[str] = None
if self.use_input_mask:
UpperCAmelCase_ : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ : Dict = None
if self.use_token_type_ids:
UpperCAmelCase_ : str = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
UpperCAmelCase_ : int = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : Union[str, Any] = None
if self.use_labels:
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
UpperCAmelCase_ : int = ids_tensor([self.batch_size] ,self.num_choices )
UpperCAmelCase_ : Tuple = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self: Any ) -> Dict:
return MobileBertConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,embedding_size=self.embedding_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=lowerCamelCase_ ,initializer_range=self.initializer_range ,)
def A__ ( self: List[Any] ,lowerCamelCase_: str ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: Any ,lowerCamelCase_: List[Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: str ,lowerCamelCase_: str ) -> int:
UpperCAmelCase_ : Any = MobileBertModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : List[Any] = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ ,token_type_ids=lowerCamelCase_ )
UpperCAmelCase_ : Tuple = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def A__ ( self: Optional[Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Tuple ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Dict ) -> int:
UpperCAmelCase_ : Union[str, Any] = MobileBertForMaskedLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def A__ ( self: str ,lowerCamelCase_: Any ,lowerCamelCase_: Dict ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: str ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: int ) -> int:
UpperCAmelCase_ : List[Any] = MobileBertForNextSentencePrediction(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(
lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, 2) )
def A__ ( self: Tuple ,lowerCamelCase_: Tuple ,lowerCamelCase_: Dict ,lowerCamelCase_: List[str] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Tuple ,lowerCamelCase_: Dict ,lowerCamelCase_: Any ) -> Optional[Any]:
UpperCAmelCase_ : Tuple = MobileBertForPreTraining(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Optional[int] = model(
lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ ,next_sentence_label=lowerCamelCase_ ,)
self.parent.assertEqual(result.prediction_logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape ,(self.batch_size, 2) )
def A__ ( self: Any ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: Any ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Any ,lowerCamelCase_: int ,lowerCamelCase_: List[Any] ) -> List[str]:
UpperCAmelCase_ : Optional[Any] = MobileBertForQuestionAnswering(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : int = model(
lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,start_positions=lowerCamelCase_ ,end_positions=lowerCamelCase_ ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def A__ ( self: List[str] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Any ,lowerCamelCase_: Tuple ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Any ) -> str:
UpperCAmelCase_ : Optional[Any] = self.num_labels
UpperCAmelCase_ : Union[str, Any] = MobileBertForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Optional[int] = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def A__ ( self: Union[str, Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: str ,lowerCamelCase_: Dict ,lowerCamelCase_: Any ,lowerCamelCase_: List[str] ) -> Any:
UpperCAmelCase_ : str = self.num_labels
UpperCAmelCase_ : Optional[int] = MobileBertForTokenClassification(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : List[Any] = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def A__ ( self: Tuple ,lowerCamelCase_: str ,lowerCamelCase_: int ,lowerCamelCase_: Tuple ,lowerCamelCase_: List[Any] ,lowerCamelCase_: str ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: List[Any] ) -> Union[str, Any]:
UpperCAmelCase_ : Union[str, Any] = self.num_choices
UpperCAmelCase_ : Tuple = MobileBertForMultipleChoice(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Dict = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
UpperCAmelCase_ : Union[str, Any] = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
UpperCAmelCase_ : str = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
UpperCAmelCase_ : Optional[int] = model(
lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def A__ ( self: List[str] ) -> str:
UpperCAmelCase_ : str = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : Union[str, Any] = config_and_inputs
UpperCAmelCase_ : Dict = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _snake_case ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
A__ : Dict = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
A__ : List[str] = (
{
"feature-extraction": MobileBertModel,
"fill-mask": MobileBertForMaskedLM,
"question-answering": MobileBertForQuestionAnswering,
"text-classification": MobileBertForSequenceClassification,
"token-classification": MobileBertForTokenClassification,
"zero-shot": MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
A__ : List[str] = True
def A__ ( self: Dict ,lowerCamelCase_: Tuple ,lowerCamelCase_: Tuple ,lowerCamelCase_: int=False ) -> Union[str, Any]:
UpperCAmelCase_ : List[Any] = super()._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ,return_labels=lowerCamelCase_ )
if return_labels:
if model_class in get_values(lowerCamelCase_ ):
UpperCAmelCase_ : Any = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) ,dtype=torch.long ,device=lowerCamelCase_ )
UpperCAmelCase_ : List[str] = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=lowerCamelCase_ )
return inputs_dict
def A__ ( self: List[str] ) -> Any:
UpperCAmelCase_ : List[str] = MobileBertModelTester(self )
UpperCAmelCase_ : Union[str, Any] = ConfigTester(self ,config_class=lowerCamelCase_ ,hidden_size=37 )
def A__ ( self: Optional[Any] ) -> List[Any]:
self.config_tester.run_common_tests()
def A__ ( self: List[str] ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*lowerCamelCase_ )
def A__ ( self: Optional[int] ) -> Optional[int]:
UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*lowerCamelCase_ )
def A__ ( self: Optional[Any] ) -> Tuple:
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*lowerCamelCase_ )
def A__ ( self: List[Any] ) -> List[str]:
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*lowerCamelCase_ )
def A__ ( self: Optional[Any] ) -> Dict:
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*lowerCamelCase_ )
def A__ ( self: Optional[int] ) -> Optional[int]:
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*lowerCamelCase_ )
def A__ ( self: Union[str, Any] ) -> Optional[int]:
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*lowerCamelCase_ )
def A__ ( self: Any ) -> Optional[int]:
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*lowerCamelCase_ )
def lowerCamelCase_ ( _a : Union[str, Any] ):
'''simple docstring'''
return torch.tensor(
_a , dtype=torch.long , device=_a , )
UpperCamelCase_ = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
@slow
def A__ ( self: List[Any] ) -> str:
UpperCAmelCase_ : Any = MobileBertModel.from_pretrained("""google/mobilebert-uncased""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : str = _long_tensor([[101, 7110, 1005, 1056, 2023, 11333, 17413, 1029, 102]] )
with torch.no_grad():
UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ )[0]
UpperCAmelCase_ : Union[str, Any] = torch.Size((1, 9, 512) )
self.assertEqual(output.shape ,lowerCamelCase_ )
UpperCAmelCase_ : Tuple = torch.tensor(
[
[
[-2.473_6526e07, 8.269_1656e04, 1.652_1838e05],
[-5.754_1704e-01, 3.905_6022e00, 4.401_1507e00],
[2.604_7359e00, 1.567_7652e00, -1.732_4188e-01],
]
] ,device=lowerCamelCase_ ,)
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
UpperCAmelCase_ : Dict = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
UpperCAmelCase_ : Dict = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 345 | 1 |
from typing import Any
class _snake_case :
'''simple docstring'''
def __init__( self: str ,lowerCamelCase_: Any ) -> int:
UpperCAmelCase_ : Optional[Any] = data
UpperCAmelCase_ : Optional[Any] = None
class _snake_case :
'''simple docstring'''
def __init__( self: List[str] ) -> Union[str, Any]:
UpperCAmelCase_ : str = None
def A__ ( self: Tuple ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[Any] = self.head
while temp is not None:
print(temp.data ,end=""" """ )
UpperCAmelCase_ : Optional[int] = temp.next
print()
def A__ ( self: List[str] ,lowerCamelCase_: Any ) -> Union[str, Any]:
UpperCAmelCase_ : List[Any] = Node(lowerCamelCase_ )
UpperCAmelCase_ : Tuple = self.head
UpperCAmelCase_ : Optional[Any] = new_node
def A__ ( self: Optional[Any] ,lowerCamelCase_: str ,lowerCamelCase_: Union[str, Any] ) -> str:
if node_data_a == node_data_a:
return
else:
UpperCAmelCase_ : Dict = self.head
while node_a is not None and node_a.data != node_data_a:
UpperCAmelCase_ : Dict = node_a.next
UpperCAmelCase_ : List[Any] = self.head
while node_a is not None and node_a.data != node_data_a:
UpperCAmelCase_ : Optional[Any] = node_a.next
if node_a is None or node_a is None:
return
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = node_a.data, node_a.data
if __name__ == "__main__":
UpperCamelCase_ = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print('''After swapping''')
ll.print_list()
| 345 |
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: str ) -> int:
UpperCAmelCase_ : List[Any] = """ylacombe/bark-small"""
UpperCAmelCase_ : Tuple = tempfile.mkdtemp()
UpperCAmelCase_ : Union[str, Any] = """en_speaker_1"""
UpperCAmelCase_ : Optional[Any] = """This is a test string"""
UpperCAmelCase_ : int = """speaker_embeddings_path.json"""
UpperCAmelCase_ : Any = """speaker_embeddings"""
def A__ ( self: Tuple ,**lowerCamelCase_: List[str] ) -> List[Any]:
return AutoTokenizer.from_pretrained(self.checkpoint ,**lowerCamelCase_ )
def A__ ( self: str ) -> Union[str, Any]:
shutil.rmtree(self.tmpdirname )
def A__ ( self: List[Any] ) -> int:
UpperCAmelCase_ : int = self.get_tokenizer()
UpperCAmelCase_ : Tuple = BarkProcessor(tokenizer=lowerCamelCase_ )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : Optional[int] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer.get_vocab() )
@slow
def A__ ( self: List[Any] ) -> Optional[int]:
UpperCAmelCase_ : List[Any] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint ,speaker_embeddings_dict_path=self.speaker_embeddings_dict_path ,)
processor.save_pretrained(
self.tmpdirname ,speaker_embeddings_dict_path=self.speaker_embeddings_dict_path ,speaker_embeddings_directory=self.speaker_embeddings_directory ,)
UpperCAmelCase_ : Optional[Any] = self.get_tokenizer(bos_token="""(BOS)""" ,eos_token="""(EOS)""" )
UpperCAmelCase_ : List[Any] = BarkProcessor.from_pretrained(
self.tmpdirname ,self.speaker_embeddings_dict_path ,bos_token="""(BOS)""" ,eos_token="""(EOS)""" ,)
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
def A__ ( self: List[str] ) -> Optional[Any]:
UpperCAmelCase_ : Any = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint ,speaker_embeddings_dict_path=self.speaker_embeddings_dict_path ,)
UpperCAmelCase_ : Optional[int] = 35
UpperCAmelCase_ : Optional[int] = 2
UpperCAmelCase_ : Dict = 8
UpperCAmelCase_ : Optional[int] = {
"""semantic_prompt""": np.ones(lowerCamelCase_ ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
UpperCAmelCase_ : str = processor(text=self.input_string ,voice_preset=lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() ,processed_voice_preset.get(lowerCamelCase_ ,np.array([] ) ).tolist() )
# test loading voice preset from npz file
UpperCAmelCase_ : List[Any] = os.path.join(self.tmpdirname ,"""file.npz""" )
np.savez(lowerCamelCase_ ,**lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = processor(text=self.input_string ,voice_preset=lowerCamelCase_ )
UpperCAmelCase_ : int = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() ,processed_voice_preset.get(lowerCamelCase_ ,np.array([] ) ).tolist() )
# test loading voice preset from the hub
UpperCAmelCase_ : Union[str, Any] = processor(text=self.input_string ,voice_preset=self.voice_preset )
def A__ ( self: Dict ) -> Tuple:
UpperCAmelCase_ : Any = self.get_tokenizer()
UpperCAmelCase_ : Dict = BarkProcessor(tokenizer=lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = processor(text=self.input_string )
UpperCAmelCase_ : str = tokenizer(
self.input_string ,padding="""max_length""" ,max_length=256 ,add_special_tokens=lowerCamelCase_ ,return_attention_mask=lowerCamelCase_ ,return_token_type_ids=lowerCamelCase_ ,)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key].squeeze().tolist() )
| 345 | 1 |
import math
class _snake_case :
'''simple docstring'''
def __init__( self: Tuple ,lowerCamelCase_: Tuple=0 ) -> Dict: # a graph with Node 0,1,...,N-1
UpperCAmelCase_ : Union[str, Any] = n
UpperCAmelCase_ : int = [
[math.inf for j in range(0 ,lowerCamelCase_ )] for i in range(0 ,lowerCamelCase_ )
] # adjacency matrix for weight
UpperCAmelCase_ : List[Any] = [
[math.inf for j in range(0 ,lowerCamelCase_ )] for i in range(0 ,lowerCamelCase_ )
] # dp[i][j] stores minimum distance from i to j
def A__ ( self: Any ,lowerCamelCase_: str ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Union[str, Any] ) -> Optional[int]:
UpperCAmelCase_ : Dict = w
def A__ ( self: str ) -> Tuple:
for k in range(0 ,self.n ):
for i in range(0 ,self.n ):
for j in range(0 ,self.n ):
UpperCAmelCase_ : List[str] = min(self.dp[i][j] ,self.dp[i][k] + self.dp[k][j] )
def A__ ( self: List[Any] ,lowerCamelCase_: str ,lowerCamelCase_: Dict ) -> Union[str, Any]:
return self.dp[u][v]
if __name__ == "__main__":
UpperCamelCase_ = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 345 |
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: Optional[int] ) -> Any:
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
UpperCAmelCase_ : Union[str, Any] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : str = -1
UpperCAmelCase_ : Dict = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ )
UpperCAmelCase_ : Any = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
UpperCAmelCase_ : List[Any] = TextStreamer(lowerCamelCase_ )
model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ ,streamer=lowerCamelCase_ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
UpperCAmelCase_ : Optional[int] = cs.out[:-1]
self.assertEqual(lowerCamelCase_ ,lowerCamelCase_ )
def A__ ( self: Dict ) -> Optional[Any]:
UpperCAmelCase_ : str = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
UpperCAmelCase_ : Optional[Any] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = -1
UpperCAmelCase_ : List[Any] = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(lowerCamelCase_ )
UpperCAmelCase_ : List[str] = model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ )
UpperCAmelCase_ : Dict = tokenizer.decode(greedy_ids[0] )
UpperCAmelCase_ : str = TextIteratorStreamer(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer}
UpperCAmelCase_ : str = Thread(target=model.generate ,kwargs=lowerCamelCase_ )
thread.start()
UpperCAmelCase_ : int = """"""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(lowerCamelCase_ ,lowerCamelCase_ )
def A__ ( self: List[Any] ) -> Dict:
UpperCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
UpperCAmelCase_ : Optional[Any] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = -1
UpperCAmelCase_ : Tuple = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(lowerCamelCase_ )
UpperCAmelCase_ : Dict = model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ )
UpperCAmelCase_ : str = greedy_ids[:, input_ids.shape[1] :]
UpperCAmelCase_ : Dict = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
UpperCAmelCase_ : List[Any] = TextStreamer(lowerCamelCase_ ,skip_prompt=lowerCamelCase_ )
model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ ,streamer=lowerCamelCase_ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
UpperCAmelCase_ : List[str] = cs.out[:-1]
self.assertEqual(lowerCamelCase_ ,lowerCamelCase_ )
def A__ ( self: str ) -> str:
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
UpperCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained("""distilgpt2""" )
UpperCAmelCase_ : Optional[Any] = AutoModelForCausalLM.from_pretrained("""distilgpt2""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : Any = -1
UpperCAmelCase_ : Union[str, Any] = torch.ones((1, 5) ,device=lowerCamelCase_ ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
UpperCAmelCase_ : Union[str, Any] = TextStreamer(lowerCamelCase_ ,skip_special_tokens=lowerCamelCase_ )
model.generate(lowerCamelCase_ ,max_new_tokens=1 ,do_sample=lowerCamelCase_ ,streamer=lowerCamelCase_ )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
UpperCAmelCase_ : List[str] = cs.out[:-1] # Remove the final "\n"
UpperCAmelCase_ : Dict = tokenizer(lowerCamelCase_ ,return_tensors="""pt""" )
self.assertEqual(streamer_text_tokenized.input_ids.shape ,(1, 1) )
def A__ ( self: List[str] ) -> Any:
UpperCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
UpperCAmelCase_ : Any = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : List[str] = -1
UpperCAmelCase_ : Optional[Any] = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = TextIteratorStreamer(lowerCamelCase_ ,timeout=0.0_0_1 )
UpperCAmelCase_ : Any = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer}
UpperCAmelCase_ : Dict = Thread(target=model.generate ,kwargs=lowerCamelCase_ )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(lowerCamelCase_ ):
UpperCAmelCase_ : Union[str, Any] = """"""
for new_text in streamer:
streamer_text += new_text
| 345 | 1 |
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
UpperCamelCase_ = version.parse(importlib_metadata.version('''nltk'''))
if NLTK_VERSION >= version.Version('''3.6.4'''):
from nltk import word_tokenize
UpperCamelCase_ = '''\
@inproceedings{banarjee2005,
title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},
author = {Banerjee, Satanjeev and Lavie, Alon},
booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},
month = jun,
year = {2005},
address = {Ann Arbor, Michigan},
publisher = {Association for Computational Linguistics},
url = {https://www.aclweb.org/anthology/W05-0909},
pages = {65--72},
}
'''
UpperCamelCase_ = '''\
METEOR, an automatic metric for machine translation evaluation
that is based on a generalized concept of unigram matching between the
machine-produced translation and human-produced reference translations.
Unigrams can be matched based on their surface forms, stemmed forms,
and meanings; furthermore, METEOR can be easily extended to include more
advanced matching strategies. Once all generalized unigram matches
between the two strings have been found, METEOR computes a score for
this matching using a combination of unigram-precision, unigram-recall, and
a measure of fragmentation that is designed to directly capture how
well-ordered the matched words in the machine translation are in relation
to the reference.
METEOR gets an R correlation value of 0.347 with human evaluation on the Arabic
data and 0.331 on the Chinese data. This is shown to be an improvement on
using simply unigram-precision, unigram-recall and their harmonic F1
combination.
'''
UpperCamelCase_ = '''
Computes METEOR score of translated segments against one or more references.
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
alpha: Parameter for controlling relative weights of precision and recall. default: 0.9
beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3
gamma: Relative weight assigned to fragmentation penalty. default: 0.5
Returns:
\'meteor\': meteor score.
Examples:
>>> meteor = datasets.load_metric(\'meteor\')
>>> predictions = ["It is a guide to action which ensures that the military always obeys the commands of the party"]
>>> references = ["It is a guide to action that ensures that the military will forever heed Party commands"]
>>> results = meteor.compute(predictions=predictions, references=references)
>>> print(round(results["meteor"], 4))
0.6944
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
'''simple docstring'''
def A__ ( self: List[str] ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" ,id="""sequence""" ),
"""references""": datasets.Value("""string""" ,id="""sequence""" ),
} ) ,codebase_urls=["""https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py"""] ,reference_urls=[
"""https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score""",
"""https://en.wikipedia.org/wiki/METEOR""",
] ,)
def A__ ( self: Optional[int] ,lowerCamelCase_: List[Any] ) -> Tuple:
import nltk
nltk.download("""wordnet""" )
if NLTK_VERSION >= version.Version("""3.6.5""" ):
nltk.download("""punkt""" )
if NLTK_VERSION >= version.Version("""3.6.6""" ):
nltk.download("""omw-1.4""" )
def A__ ( self: Tuple ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Any ,lowerCamelCase_: List[str]=0.9 ,lowerCamelCase_: Dict=3 ,lowerCamelCase_: int=0.5 ) -> Any:
if NLTK_VERSION >= version.Version("""3.6.5""" ):
UpperCAmelCase_ : Optional[Any] = [
meteor_score.single_meteor_score(
word_tokenize(lowerCamelCase_ ) ,word_tokenize(lowerCamelCase_ ) ,alpha=lowerCamelCase_ ,beta=lowerCamelCase_ ,gamma=lowerCamelCase_ )
for ref, pred in zip(lowerCamelCase_ ,lowerCamelCase_ )
]
else:
UpperCAmelCase_ : Tuple = [
meteor_score.single_meteor_score(lowerCamelCase_ ,lowerCamelCase_ ,alpha=lowerCamelCase_ ,beta=lowerCamelCase_ ,gamma=lowerCamelCase_ )
for ref, pred in zip(lowerCamelCase_ ,lowerCamelCase_ )
]
return {"meteor": np.mean(lowerCamelCase_ )}
| 345 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
@property
def A__ ( self: Optional[int] ) -> int:
torch.manual_seed(0 )
UpperCAmelCase_ : Union[str, Any] = UNetaDModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") ,up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") ,)
return model
@property
def A__ ( self: Tuple ) -> Optional[Any]:
torch.manual_seed(0 )
UpperCAmelCase_ : List[str] = VQModel(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=3 ,)
return model
@property
def A__ ( self: Tuple ) -> Any:
torch.manual_seed(0 )
UpperCAmelCase_ : int = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,)
return CLIPTextModel(lowerCamelCase_ )
def A__ ( self: str ) -> Optional[Any]:
UpperCAmelCase_ : str = self.dummy_uncond_unet
UpperCAmelCase_ : List[Any] = DDIMScheduler()
UpperCAmelCase_ : List[Any] = self.dummy_vq_model
UpperCAmelCase_ : Optional[int] = LDMPipeline(unet=lowerCamelCase_ ,vqvae=lowerCamelCase_ ,scheduler=lowerCamelCase_ )
ldm.to(lowerCamelCase_ )
ldm.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : Any = torch.manual_seed(0 )
UpperCAmelCase_ : int = ldm(generator=lowerCamelCase_ ,num_inference_steps=2 ,output_type="""numpy""" ).images
UpperCAmelCase_ : List[str] = torch.manual_seed(0 )
UpperCAmelCase_ : Union[str, Any] = ldm(generator=lowerCamelCase_ ,num_inference_steps=2 ,output_type="""numpy""" ,return_dict=lowerCamelCase_ )[0]
UpperCAmelCase_ : Optional[Any] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ : str = np.array([0.8_5_1_2, 0.8_1_8, 0.6_4_1_1, 0.6_8_0_8, 0.4_4_6_5, 0.5_6_1_8, 0.4_6, 0.6_2_3_1, 0.5_1_7_2] )
UpperCAmelCase_ : Tuple = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: Optional[int] ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = LDMPipeline.from_pretrained("""CompVis/ldm-celebahq-256""" )
ldm.to(lowerCamelCase_ )
ldm.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = torch.manual_seed(0 )
UpperCAmelCase_ : Optional[int] = ldm(generator=lowerCamelCase_ ,num_inference_steps=5 ,output_type="""numpy""" ).images
UpperCAmelCase_ : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCAmelCase_ : int = np.array([0.4_3_9_9, 0.4_4_9_7_5, 0.4_6_8_2_5, 0.4_7_4, 0.4_3_5_9, 0.4_5_8_1, 0.4_5_0_9_5, 0.4_3_4_1, 0.4_4_4_7] )
UpperCAmelCase_ : Union[str, Any] = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 345 | 1 |
from __future__ import annotations
def lowerCamelCase_ ( _a : int | float | str , _a : int | float | str ):
'''simple docstring'''
if nth_term == "":
return [""]
UpperCAmelCase_ : Tuple = int(_a )
UpperCAmelCase_ : Union[str, Any] = int(_a )
UpperCAmelCase_ : list[str] = []
for temp in range(int(_a ) ):
series.append(F'''1 / {pow(temp + 1 , int(_a ) )}''' if series else """1""" )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase_ = int(input('''Enter the last number (nth term) of the P-Series'''))
UpperCamelCase_ = int(input('''Enter the power for P-Series'''))
print('''Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p''')
print(p_series(nth_term, power))
| 345 |
def lowerCamelCase_ ( _a : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = [0] * len(_a )
UpperCAmelCase_ : Dict = []
UpperCAmelCase_ : Optional[int] = []
UpperCAmelCase_ : Dict = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(_a ) ):
if indegree[i] == 0:
queue.append(_a )
while queue:
UpperCAmelCase_ : List[str] = queue.pop(0 )
cnt += 1
topo.append(_a )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(_a )
if cnt != len(_a ):
print("""Cycle exists""" )
else:
print(_a )
# Adjacency List of Graph
UpperCamelCase_ = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 345 | 1 |
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def lowerCamelCase_ ( _a : Tuple , _a : List[Any]=False ):
'''simple docstring'''
try:
UpperCAmelCase_ : int = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
UpperCAmelCase_ : Tuple = default
else:
# KEY is set, convert it to True or False.
try:
UpperCAmelCase_ : Union[str, Any] = strtobool(_a )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F'''If set, {key} must be yes or no.''' )
return _value
UpperCamelCase_ = parse_flag_from_env('''RUN_SLOW''', default=False)
UpperCamelCase_ = parse_flag_from_env('''RUN_REMOTE''', default=False)
UpperCamelCase_ = parse_flag_from_env('''RUN_LOCAL''', default=True)
UpperCamelCase_ = parse_flag_from_env('''RUN_PACKAGED''', default=True)
# Compression
UpperCamelCase_ = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''')
UpperCamelCase_ = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''')
UpperCamelCase_ = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''')
# Audio
UpperCamelCase_ = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''),
reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''',
)
# Beam
UpperCamelCase_ = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''),
reason='''test requires apache-beam and a compatible dill version''',
)
# Dill-cloudpickle compatibility
UpperCamelCase_ = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('''0.3.2'''),
reason='''test requires dill>0.3.2 for cloudpickle compatibility''',
)
# Windows
UpperCamelCase_ = pytest.mark.skipif(
sys.platform == '''win32''',
reason='''test should not be run on Windows''',
)
def lowerCamelCase_ ( _a : Any ):
'''simple docstring'''
try:
import faiss # noqa
except ImportError:
UpperCAmelCase_ : Tuple = unittest.skip("""test requires faiss""" )(_a )
return test_case
def lowerCamelCase_ ( _a : Union[str, Any] ):
'''simple docstring'''
try:
import regex # noqa
except ImportError:
UpperCAmelCase_ : int = unittest.skip("""test requires regex""" )(_a )
return test_case
def lowerCamelCase_ ( _a : int ):
'''simple docstring'''
try:
import elasticsearch # noqa
except ImportError:
UpperCAmelCase_ : List[str] = unittest.skip("""test requires elasticsearch""" )(_a )
return test_case
def lowerCamelCase_ ( _a : Dict ):
'''simple docstring'''
try:
import sqlalchemy # noqa
except ImportError:
UpperCAmelCase_ : Optional[Any] = unittest.skip("""test requires sqlalchemy""" )(_a )
return test_case
def lowerCamelCase_ ( _a : int ):
'''simple docstring'''
if not config.TORCH_AVAILABLE:
UpperCAmelCase_ : Optional[Any] = unittest.skip("""test requires PyTorch""" )(_a )
return test_case
def lowerCamelCase_ ( _a : Dict ):
'''simple docstring'''
if not config.TF_AVAILABLE:
UpperCAmelCase_ : Union[str, Any] = unittest.skip("""test requires TensorFlow""" )(_a )
return test_case
def lowerCamelCase_ ( _a : Tuple ):
'''simple docstring'''
if not config.JAX_AVAILABLE:
UpperCAmelCase_ : Dict = unittest.skip("""test requires JAX""" )(_a )
return test_case
def lowerCamelCase_ ( _a : Dict ):
'''simple docstring'''
if not config.PIL_AVAILABLE:
UpperCAmelCase_ : Any = unittest.skip("""test requires Pillow""" )(_a )
return test_case
def lowerCamelCase_ ( _a : Optional[int] ):
'''simple docstring'''
try:
import transformers # noqa F401
except ImportError:
return unittest.skip("""test requires transformers""" )(_a )
else:
return test_case
def lowerCamelCase_ ( _a : Any ):
'''simple docstring'''
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip("""test requires tiktoken""" )(_a )
else:
return test_case
def lowerCamelCase_ ( _a : Union[str, Any] ):
'''simple docstring'''
try:
import spacy # noqa F401
except ImportError:
return unittest.skip("""test requires spacy""" )(_a )
else:
return test_case
def lowerCamelCase_ ( _a : Tuple ):
'''simple docstring'''
def _require_spacy_model(_a : List[Any] ):
try:
import spacy # noqa F401
spacy.load(_a )
except ImportError:
return unittest.skip("""test requires spacy""" )(_a )
except OSError:
return unittest.skip("""test requires spacy model '{}'""".format(_a ) )(_a )
else:
return test_case
return _require_spacy_model
def lowerCamelCase_ ( _a : Tuple ):
'''simple docstring'''
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip("""test requires pyspark""" )(_a )
else:
return test_case
def lowerCamelCase_ ( _a : List[Any] ):
'''simple docstring'''
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip("""test requires joblibspark""" )(_a )
else:
return test_case
def lowerCamelCase_ ( _a : Union[str, Any] ):
'''simple docstring'''
if not _run_slow_tests or _run_slow_tests == 0:
UpperCAmelCase_ : str = unittest.skip("""test is slow""" )(_a )
return test_case
def lowerCamelCase_ ( _a : Tuple ):
'''simple docstring'''
if not _run_local_tests or _run_local_tests == 0:
UpperCAmelCase_ : int = unittest.skip("""test is local""" )(_a )
return test_case
def lowerCamelCase_ ( _a : Any ):
'''simple docstring'''
if not _run_packaged_tests or _run_packaged_tests == 0:
UpperCAmelCase_ : Union[str, Any] = unittest.skip("""test is packaged""" )(_a )
return test_case
def lowerCamelCase_ ( _a : Union[str, Any] ):
'''simple docstring'''
if not _run_remote_tests or _run_remote_tests == 0:
UpperCAmelCase_ : Tuple = unittest.skip("""test requires remote""" )(_a )
return test_case
def lowerCamelCase_ ( *_a : Optional[int] ):
'''simple docstring'''
def decorate(cls : List[Any] ):
for name, fn in cls.__dict__.items():
if callable(_a ) and name.startswith("""test""" ):
for decorator in decorators:
UpperCAmelCase_ : int = decorator(_a )
setattr(cls , _a , _a )
return cls
return decorate
class _snake_case ( __snake_case ):
'''simple docstring'''
pass
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : Dict = 0
A__ : List[Any] = 1
A__ : Optional[Any] = 2
@contextmanager
def lowerCamelCase_ ( _a : List[Any]=OfflineSimulationMode.CONNECTION_FAILS , _a : int=1E-16 ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = requests.Session().request
def timeout_request(_a : Optional[Any] , _a : Dict , _a : Dict , **_a : Tuple ):
# Change the url to an invalid url so that the connection hangs
UpperCAmelCase_ : int = """https://10.255.255.1"""
if kwargs.get("""timeout""" ) is None:
raise RequestWouldHangIndefinitelyError(
F'''Tried a call to {url} in offline mode with no timeout set. Please set a timeout.''' )
UpperCAmelCase_ : Optional[int] = timeout
try:
return online_request(_a , _a , **_a )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
UpperCAmelCase_ : Tuple = url
UpperCAmelCase_ : str = e.args[0]
UpperCAmelCase_ : Optional[int] = (max_retry_error.args[0].replace("""10.255.255.1""" , F'''OfflineMock[{url}]''' ),)
UpperCAmelCase_ : Any = (max_retry_error,)
raise
def raise_connection_error(_a : str , _a : Optional[Any] , **_a : Tuple ):
raise requests.ConnectionError("""Offline mode is enabled.""" , request=_a )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch("""requests.Session.send""" , _a ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch("""requests.Session.request""" , _a ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch("""datasets.config.HF_DATASETS_OFFLINE""" , _a ):
yield
else:
raise ValueError("""Please use a value from the OfflineSimulationMode enum.""" )
@contextmanager
def lowerCamelCase_ ( *_a : List[Any] , **_a : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = str(Path().resolve() )
with tempfile.TemporaryDirectory(*_a , **_a ) as tmp_dir:
try:
os.chdir(_a )
yield
finally:
os.chdir(_a )
@contextmanager
def lowerCamelCase_ ( ):
'''simple docstring'''
import gc
gc.collect()
UpperCAmelCase_ : List[Any] = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def lowerCamelCase_ ( ):
'''simple docstring'''
import gc
gc.collect()
UpperCAmelCase_ : int = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def lowerCamelCase_ ( _a : Union[str, Any] , _a : Tuple ):
'''simple docstring'''
return deepcopy(_a ).integers(0 , 100 , 10 ).tolist() == deepcopy(_a ).integers(0 , 100 , 10 ).tolist()
def lowerCamelCase_ ( _a : List[str] ):
'''simple docstring'''
import decorator
from requests.exceptions import HTTPError
def _wrapper(_a : List[Any] , *_a : Any , **_a : Optional[Any] ):
try:
return func(*_a , **_a )
except HTTPError as err:
if str(_a ).startswith("""500""" ) or str(_a ).startswith("""502""" ):
pytest.xfail(str(_a ) )
raise err
return decorator.decorator(_wrapper , _a )
class _snake_case :
'''simple docstring'''
def __init__( self: Optional[Any] ,lowerCamelCase_: str ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Optional[Any] ) -> List[Any]:
UpperCAmelCase_ : Union[str, Any] = returncode
UpperCAmelCase_ : Optional[Any] = stdout
UpperCAmelCase_ : List[Any] = stderr
async def lowerCamelCase_ ( _a : List[str] , _a : List[Any] ):
'''simple docstring'''
while True:
UpperCAmelCase_ : Optional[Any] = await stream.readline()
if line:
callback(_a )
else:
break
async def lowerCamelCase_ ( _a : Optional[int] , _a : Optional[Any]=None , _a : Any=None , _a : Union[str, Any]=None , _a : Any=False , _a : Union[str, Any]=False ):
'''simple docstring'''
if echo:
print("""\nRunning: """ , """ """.join(_a ) )
UpperCAmelCase_ : Tuple = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_a , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_a , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
UpperCAmelCase_ : Optional[Any] = []
UpperCAmelCase_ : int = []
def tee(_a : int , _a : Dict , _a : Dict , _a : List[str]="" ):
UpperCAmelCase_ : Tuple = line.decode("""utf-8""" ).rstrip()
sink.append(_a )
if not quiet:
print(_a , _a , file=_a )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda _a : tee(_a , _a , sys.stdout , label="""stdout:""" ) ),
_read_stream(p.stderr , lambda _a : tee(_a , _a , sys.stderr , label="""stderr:""" ) ),
] , timeout=_a , )
return _RunOutput(await p.wait() , _a , _a )
def lowerCamelCase_ ( _a : Union[str, Any] , _a : Any=None , _a : Optional[Any]=None , _a : str=180 , _a : Dict=False , _a : Dict=True ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = asyncio.get_event_loop()
UpperCAmelCase_ : Any = loop.run_until_complete(
_stream_subprocess(_a , env=_a , stdin=_a , timeout=_a , quiet=_a , echo=_a ) )
UpperCAmelCase_ : List[str] = """ """.join(_a )
if result.returncode > 0:
UpperCAmelCase_ : Union[str, Any] = """\n""".join(result.stderr )
raise RuntimeError(
F'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
F'''The combined stderr from workers follows:\n{stderr}''' )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(F'''\'{cmd_str}\' produced no output.''' )
return result
def lowerCamelCase_ ( ):
'''simple docstring'''
UpperCAmelCase_ : int = os.environ.get("""PYTEST_XDIST_WORKER""" , """gw0""" )
UpperCAmelCase_ : int = re.sub(r"""^gw""" , """""" , _a , 0 , re.M )
return int(_a )
def lowerCamelCase_ ( ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = 2_9500
UpperCAmelCase_ : Union[str, Any] = pytest_xdist_worker_id()
return port + uniq_delta
| 345 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'''microsoft/swinv2-tiny-patch4-window8-256''': (
'''https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json'''
),
}
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : Optional[Any] = "swinv2"
A__ : int = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self: List[str] ,lowerCamelCase_: List[str]=224 ,lowerCamelCase_: List[str]=4 ,lowerCamelCase_: List[Any]=3 ,lowerCamelCase_: Optional[Any]=96 ,lowerCamelCase_: Any=[2, 2, 6, 2] ,lowerCamelCase_: Dict=[3, 6, 12, 24] ,lowerCamelCase_: str=7 ,lowerCamelCase_: Optional[Any]=4.0 ,lowerCamelCase_: Tuple=True ,lowerCamelCase_: List[str]=0.0 ,lowerCamelCase_: Optional[int]=0.0 ,lowerCamelCase_: List[str]=0.1 ,lowerCamelCase_: str="gelu" ,lowerCamelCase_: str=False ,lowerCamelCase_: Dict=0.0_2 ,lowerCamelCase_: Union[str, Any]=1e-5 ,lowerCamelCase_: str=32 ,**lowerCamelCase_: List[str] ,) -> Tuple:
super().__init__(**lowerCamelCase_ )
UpperCAmelCase_ : Tuple = image_size
UpperCAmelCase_ : Tuple = patch_size
UpperCAmelCase_ : Dict = num_channels
UpperCAmelCase_ : List[Any] = embed_dim
UpperCAmelCase_ : Dict = depths
UpperCAmelCase_ : Dict = len(lowerCamelCase_ )
UpperCAmelCase_ : str = num_heads
UpperCAmelCase_ : Tuple = window_size
UpperCAmelCase_ : int = mlp_ratio
UpperCAmelCase_ : str = qkv_bias
UpperCAmelCase_ : Any = hidden_dropout_prob
UpperCAmelCase_ : Tuple = attention_probs_dropout_prob
UpperCAmelCase_ : int = drop_path_rate
UpperCAmelCase_ : Optional[Any] = hidden_act
UpperCAmelCase_ : List[str] = use_absolute_embeddings
UpperCAmelCase_ : Dict = layer_norm_eps
UpperCAmelCase_ : int = initializer_range
UpperCAmelCase_ : Union[str, Any] = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCAmelCase_ : List[str] = int(embed_dim * 2 ** (len(lowerCamelCase_ ) - 1) )
UpperCAmelCase_ : Any = (0, 0, 0, 0)
| 345 | 1 |
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _snake_case ( __snake_case , unittest.TestCase ):
'''simple docstring'''
A__ : Tuple = LEDTokenizer
A__ : str = LEDTokenizerFast
A__ : str = True
def A__ ( self: str ) -> List[Any]:
super().setUp()
UpperCAmelCase_ : Union[str, Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
UpperCAmelCase_ : Union[str, Any] = dict(zip(lowerCamelCase_ ,range(len(lowerCamelCase_ ) ) ) )
UpperCAmelCase_ : Any = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
UpperCAmelCase_ : Dict = {"""unk_token""": """<unk>"""}
UpperCAmelCase_ : Optional[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCAmelCase_ : Dict = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCamelCase_ ) + """\n""" )
with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowerCamelCase_ ) )
def A__ ( self: Optional[Any] ,**lowerCamelCase_: Union[str, Any] ) -> str:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname ,**lowerCamelCase_ )
def A__ ( self: Optional[int] ,**lowerCamelCase_: Any ) -> str:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname ,**lowerCamelCase_ )
def A__ ( self: Dict ,lowerCamelCase_: List[Any] ) -> int:
return "lower newer", "lower newer"
@cached_property
def A__ ( self: List[Any] ) -> Any:
return LEDTokenizer.from_pretrained("""allenai/led-base-16384""" )
@cached_property
def A__ ( self: Tuple ) -> List[Any]:
return LEDTokenizerFast.from_pretrained("""allenai/led-base-16384""" )
@require_torch
def A__ ( self: int ) -> Optional[int]:
UpperCAmelCase_ : List[Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
UpperCAmelCase_ : Dict = [0, 250, 251, 17818, 13, 39186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase_ : Dict = tokenizer(lowerCamelCase_ ,max_length=len(lowerCamelCase_ ) ,padding=lowerCamelCase_ ,return_tensors="""pt""" )
self.assertIsInstance(lowerCamelCase_ ,lowerCamelCase_ )
self.assertEqual((2, 9) ,batch.input_ids.shape )
self.assertEqual((2, 9) ,batch.attention_mask.shape )
UpperCAmelCase_ : List[str] = batch.input_ids.tolist()[0]
self.assertListEqual(lowerCamelCase_ ,lowerCamelCase_ )
@require_torch
def A__ ( self: int ) -> Any:
UpperCAmelCase_ : Optional[Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase_ : List[Any] = tokenizer(lowerCamelCase_ ,padding=lowerCamelCase_ ,return_tensors="""pt""" )
self.assertIn("""input_ids""" ,lowerCamelCase_ )
self.assertIn("""attention_mask""" ,lowerCamelCase_ )
self.assertNotIn("""labels""" ,lowerCamelCase_ )
self.assertNotIn("""decoder_attention_mask""" ,lowerCamelCase_ )
@require_torch
def A__ ( self: Optional[Any] ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = [
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase_ : Optional[int] = tokenizer(text_target=lowerCamelCase_ ,max_length=32 ,padding="""max_length""" ,return_tensors="""pt""" )
self.assertEqual(32 ,targets["""input_ids"""].shape[1] )
@require_torch
def A__ ( self: Tuple ) -> List[str]:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase_ : Union[str, Any] = tokenizer(
["""I am a small frog""" * 1024, """I am a small frog"""] ,padding=lowerCamelCase_ ,truncation=lowerCamelCase_ ,return_tensors="""pt""" )
self.assertIsInstance(lowerCamelCase_ ,lowerCamelCase_ )
self.assertEqual(batch.input_ids.shape ,(2, 5122) )
@require_torch
def A__ ( self: Any ) -> Any:
UpperCAmelCase_ : List[str] = ["""A long paragraph for summarization."""]
UpperCAmelCase_ : List[Any] = [
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase_ : Tuple = tokenizer(lowerCamelCase_ ,return_tensors="""pt""" )
UpperCAmelCase_ : List[str] = tokenizer(text_target=lowerCamelCase_ ,return_tensors="""pt""" )
UpperCAmelCase_ : str = inputs["""input_ids"""]
UpperCAmelCase_ : int = targets["""input_ids"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def A__ ( self: List[Any] ) -> int:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase_ : Union[str, Any] = ["""Summary of the text.""", """Another summary."""]
UpperCAmelCase_ : Any = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
UpperCAmelCase_ : Optional[Any] = tokenizer(lowerCamelCase_ ,padding=lowerCamelCase_ )
UpperCAmelCase_ : Any = [[0] * len(lowerCamelCase_ ) for x in encoded_output["""input_ids"""]]
UpperCAmelCase_ : Dict = tokenizer.pad(lowerCamelCase_ )
self.assertSequenceEqual(outputs["""global_attention_mask"""] ,lowerCamelCase_ )
def A__ ( self: Tuple ) -> List[str]:
pass
def A__ ( self: Dict ) -> Dict:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCAmelCase_ : str = self.rust_tokenizer_class.from_pretrained(lowerCamelCase_ ,**lowerCamelCase_ )
UpperCAmelCase_ : List[str] = self.tokenizer_class.from_pretrained(lowerCamelCase_ ,**lowerCamelCase_ )
UpperCAmelCase_ : int = """A, <mask> AllenNLP sentence."""
UpperCAmelCase_ : List[str] = tokenizer_r.encode_plus(lowerCamelCase_ ,add_special_tokens=lowerCamelCase_ ,return_token_type_ids=lowerCamelCase_ )
UpperCAmelCase_ : Tuple = tokenizer_p.encode_plus(lowerCamelCase_ ,add_special_tokens=lowerCamelCase_ ,return_token_type_ids=lowerCamelCase_ )
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) ,sum(tokens_p["""token_type_ids"""] ) )
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) ,sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) ,)
UpperCAmelCase_ : List[Any] = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
UpperCAmelCase_ : Tuple = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
self.assertSequenceEqual(tokens_p["""input_ids"""] ,[0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] ,[0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
lowerCamelCase_ ,["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
lowerCamelCase_ ,["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
| 345 |
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: int ) -> str:
# A mock response for an HTTP head request to emulate server down
UpperCAmelCase_ : List[str] = mock.Mock()
UpperCAmelCase_ : List[Any] = 500
UpperCAmelCase_ : Union[str, Any] = {}
UpperCAmelCase_ : Union[str, Any] = HTTPError
UpperCAmelCase_ : Any = {}
# Download this model to make sure it's in the cache.
UpperCAmelCase_ : Union[str, Any] = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("""requests.Session.request""" ,return_value=lowerCamelCase_ ) as mock_head:
UpperCAmelCase_ : Any = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def A__ ( self: str ) -> int:
# A mock response for an HTTP head request to emulate server down
UpperCAmelCase_ : str = mock.Mock()
UpperCAmelCase_ : Optional[int] = 500
UpperCAmelCase_ : int = {}
UpperCAmelCase_ : Union[str, Any] = HTTPError
UpperCAmelCase_ : List[Any] = {}
# Download this model to make sure it's in the cache.
UpperCAmelCase_ : Optional[int] = GPTaTokenizerFast.from_pretrained("""gpt2""" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("""requests.Session.request""" ,return_value=lowerCamelCase_ ) as mock_head:
UpperCAmelCase_ : Any = GPTaTokenizerFast.from_pretrained("""gpt2""" )
# This check we did call the fake head request
mock_head.assert_called()
def A__ ( self: str ) -> Dict:
# This test is for deprecated behavior and can be removed in v5
try:
UpperCAmelCase_ : Any = tempfile.mktemp()
with open(lowerCamelCase_ ,"""wb""" ) as f:
http_get("""https://huggingface.co/albert-base-v1/resolve/main/spiece.model""" ,lowerCamelCase_ )
UpperCAmelCase_ : Tuple = AlbertTokenizer.from_pretrained(lowerCamelCase_ )
finally:
os.remove(lowerCamelCase_ )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile("""tokenizer.json""" ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open("""tokenizer.json""" ,"""wb""" ) as f:
http_get("""https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json""" ,lowerCamelCase_ )
UpperCAmelCase_ : str = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size ,1000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove("""tokenizer.json""" )
def A__ ( self: List[str] ) -> Tuple:
# This test is for deprecated behavior and can be removed in v5
UpperCAmelCase_ : str = AlbertTokenizer.from_pretrained("""https://huggingface.co/albert-base-v1/resolve/main/spiece.model""" )
@is_staging_test
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
A__ : str = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def A__ ( cls: Dict ) -> Optional[int]:
UpperCAmelCase_ : List[str] = TOKEN
HfFolder.save_token(lowerCamelCase_ )
@classmethod
def A__ ( cls: Optional[Any] ) -> List[str]:
try:
delete_repo(token=cls._token ,repo_id="""test-tokenizer""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id="""valid_org/test-tokenizer-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id="""test-dynamic-tokenizer""" )
except HTTPError:
pass
def A__ ( self: Any ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ : Tuple = os.path.join(lowerCamelCase_ ,"""vocab.txt""" )
with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
UpperCAmelCase_ : List[Any] = BertTokenizer(lowerCamelCase_ )
tokenizer.push_to_hub("""test-tokenizer""" ,use_auth_token=self._token )
UpperCAmelCase_ : List[Any] = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
# Reset repo
delete_repo(token=self._token ,repo_id="""test-tokenizer""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowerCamelCase_ ,repo_id="""test-tokenizer""" ,push_to_hub=lowerCamelCase_ ,use_auth_token=self._token )
UpperCAmelCase_ : List[Any] = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
def A__ ( self: Optional[int] ) -> Any:
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ : List[Any] = os.path.join(lowerCamelCase_ ,"""vocab.txt""" )
with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
UpperCAmelCase_ : Dict = BertTokenizer(lowerCamelCase_ )
tokenizer.push_to_hub("""valid_org/test-tokenizer-org""" ,use_auth_token=self._token )
UpperCAmelCase_ : Dict = BertTokenizer.from_pretrained("""valid_org/test-tokenizer-org""" )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
# Reset repo
delete_repo(token=self._token ,repo_id="""valid_org/test-tokenizer-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
lowerCamelCase_ ,repo_id="""valid_org/test-tokenizer-org""" ,push_to_hub=lowerCamelCase_ ,use_auth_token=self._token )
UpperCAmelCase_ : List[Any] = BertTokenizer.from_pretrained("""valid_org/test-tokenizer-org""" )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
@require_tokenizers
def A__ ( self: Optional[int] ) -> Optional[Any]:
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ : Any = os.path.join(lowerCamelCase_ ,"""vocab.txt""" )
with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
UpperCAmelCase_ : Optional[Any] = CustomTokenizer(lowerCamelCase_ )
# No fast custom tokenizer
tokenizer.push_to_hub("""test-dynamic-tokenizer""" ,use_auth_token=self._token )
UpperCAmelCase_ : Optional[Any] = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''' ,trust_remote_code=lowerCamelCase_ )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,"""CustomTokenizer""" )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ : List[str] = os.path.join(lowerCamelCase_ ,"""vocab.txt""" )
with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
UpperCAmelCase_ : str = BertTokenizerFast.from_pretrained(lowerCamelCase_ )
bert_tokenizer.save_pretrained(lowerCamelCase_ )
UpperCAmelCase_ : List[str] = CustomTokenizerFast.from_pretrained(lowerCamelCase_ )
tokenizer.push_to_hub("""test-dynamic-tokenizer""" ,use_auth_token=self._token )
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''' ,trust_remote_code=lowerCamelCase_ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,"""CustomTokenizerFast""" )
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(
F'''{USER}/test-dynamic-tokenizer''' ,use_fast=lowerCamelCase_ ,trust_remote_code=lowerCamelCase_ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,"""CustomTokenizer""" )
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: Optional[Any] ) -> Any:
UpperCAmelCase_ : Any = Trie()
trie.add("""Hello 友達""" )
self.assertEqual(trie.data ,{"""H""": {"""e""": {"""l""": {"""l""": {"""o""": {""" """: {"""友""": {"""達""": {"""""": 1}}}}}}}}} )
trie.add("""Hello""" )
trie.data
self.assertEqual(trie.data ,{"""H""": {"""e""": {"""l""": {"""l""": {"""o""": {"""""": 1, """ """: {"""友""": {"""達""": {"""""": 1}}}}}}}}} )
def A__ ( self: Tuple ) -> Optional[int]:
UpperCAmelCase_ : str = Trie()
self.assertEqual(trie.split("""[CLS] This is a extra_id_100""" ) ,["""[CLS] This is a extra_id_100"""] )
trie.add("""[CLS]""" )
trie.add("""extra_id_1""" )
trie.add("""extra_id_100""" )
self.assertEqual(trie.split("""[CLS] This is a extra_id_100""" ) ,["""[CLS]""", """ This is a """, """extra_id_100"""] )
def A__ ( self: Optional[Any] ) -> Optional[int]:
UpperCAmelCase_ : Dict = Trie()
trie.add("""A""" )
self.assertEqual(trie.split("""ABC""" ) ,["""A""", """BC"""] )
self.assertEqual(trie.split("""BCA""" ) ,["""BC""", """A"""] )
def A__ ( self: Union[str, Any] ) -> int:
UpperCAmelCase_ : List[str] = Trie()
trie.add("""TOKEN]""" )
trie.add("""[SPECIAL_TOKEN]""" )
self.assertEqual(trie.split("""This is something [SPECIAL_TOKEN]""" ) ,["""This is something """, """[SPECIAL_TOKEN]"""] )
def A__ ( self: int ) -> Union[str, Any]:
UpperCAmelCase_ : List[str] = Trie()
trie.add("""A""" )
trie.add("""P""" )
trie.add("""[SPECIAL_TOKEN]""" )
self.assertEqual(trie.split("""This is something [SPECIAL_TOKEN]""" ) ,["""This is something """, """[SPECIAL_TOKEN]"""] )
def A__ ( self: int ) -> List[str]:
UpperCAmelCase_ : int = Trie()
trie.add("""AB""" )
trie.add("""B""" )
trie.add("""C""" )
self.assertEqual(trie.split("""ABC""" ) ,["""AB""", """C"""] )
def A__ ( self: str ) -> Optional[int]:
UpperCAmelCase_ : Optional[Any] = Trie()
trie.add("""ABC""" )
trie.add("""B""" )
trie.add("""CD""" )
self.assertEqual(trie.split("""ABCD""" ) ,["""ABC""", """D"""] )
def A__ ( self: List[Any] ) -> Any:
# Even if the offsets are wrong, we necessarily output correct string
# parts.
UpperCAmelCase_ : Tuple = Trie()
UpperCAmelCase_ : Optional[Any] = trie.cut_text("""ABC""" ,[0, 0, 2, 1, 2, 3] )
self.assertEqual(lowerCamelCase_ ,["""AB""", """C"""] )
| 345 | 1 |
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
UpperCamelCase_ = {
# 1536-bit
5: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 2048-bit
14: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AACAA68FFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 3072-bit
15: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'''
+ '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'''
+ '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'''
+ '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'''
+ '''43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 4096-bit
16: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'''
+ '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'''
+ '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'''
+ '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'''
+ '''43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'''
+ '''88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'''
+ '''2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'''
+ '''287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'''
+ '''1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'''
+ '''93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199'''
+ '''FFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 6144-bit
17: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08'''
+ '''8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B'''
+ '''302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9'''
+ '''A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6'''
+ '''49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8'''
+ '''FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C'''
+ '''180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718'''
+ '''3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D'''
+ '''04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D'''
+ '''B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226'''
+ '''1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC'''
+ '''E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26'''
+ '''99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB'''
+ '''04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2'''
+ '''233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127'''
+ '''D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'''
+ '''36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406'''
+ '''AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918'''
+ '''DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151'''
+ '''2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03'''
+ '''F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F'''
+ '''BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'''
+ '''CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B'''
+ '''B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632'''
+ '''387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E'''
+ '''6DCC4024FFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 8192-bit
18: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'''
+ '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'''
+ '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'''
+ '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'''
+ '''43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'''
+ '''88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'''
+ '''2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'''
+ '''287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'''
+ '''1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'''
+ '''93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'''
+ '''36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD'''
+ '''F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831'''
+ '''179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B'''
+ '''DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF'''
+ '''5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6'''
+ '''D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3'''
+ '''23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'''
+ '''CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328'''
+ '''06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C'''
+ '''DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE'''
+ '''12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4'''
+ '''38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300'''
+ '''741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568'''
+ '''3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9'''
+ '''22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B'''
+ '''4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A'''
+ '''062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36'''
+ '''4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1'''
+ '''B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92'''
+ '''4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47'''
+ '''9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71'''
+ '''60C980DD98EDD3DFFFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
}
class _snake_case :
'''simple docstring'''
def __init__( self: Optional[int] ,lowerCamelCase_: int = 14 ) -> None:
if group not in primes:
raise ValueError("""Unsupported Group""" )
UpperCAmelCase_ : List[str] = primes[group]["""prime"""]
UpperCAmelCase_ : Union[str, Any] = primes[group]["""generator"""]
UpperCAmelCase_ : Dict = int(hexlify(urandom(32 ) ) ,base=16 )
def A__ ( self: Optional[int] ) -> str:
return hex(self.__private_key )[2:]
def A__ ( self: List[str] ) -> str:
UpperCAmelCase_ : str = pow(self.generator ,self.__private_key ,self.prime )
return hex(lowerCamelCase_ )[2:]
def A__ ( self: Dict ,lowerCamelCase_: int ) -> bool:
# check if the other public key is valid based on NIST SP800-56
return (
2 <= key <= self.prime - 2
and pow(lowerCamelCase_ ,(self.prime - 1) // 2 ,self.prime ) == 1
)
def A__ ( self: int ,lowerCamelCase_: str ) -> str:
UpperCAmelCase_ : Any = int(lowerCamelCase_ ,base=16 )
if not self.is_valid_public_key(lowerCamelCase_ ):
raise ValueError("""Invalid public key""" )
UpperCAmelCase_ : Dict = pow(lowerCamelCase_ ,self.__private_key ,self.prime )
return shaaaa(str(lowerCamelCase_ ).encode() ).hexdigest()
@staticmethod
def A__ ( lowerCamelCase_: int ,lowerCamelCase_: int ) -> bool:
# check if the other public key is valid based on NIST SP800-56
return (
2 <= remote_public_key_str <= prime - 2
and pow(lowerCamelCase_ ,(prime - 1) // 2 ,lowerCamelCase_ ) == 1
)
@staticmethod
def A__ ( lowerCamelCase_: str ,lowerCamelCase_: str ,lowerCamelCase_: int = 14 ) -> str:
UpperCAmelCase_ : Optional[Any] = int(lowerCamelCase_ ,base=16 )
UpperCAmelCase_ : Tuple = int(lowerCamelCase_ ,base=16 )
UpperCAmelCase_ : Union[str, Any] = primes[group]["""prime"""]
if not DiffieHellman.is_valid_public_key_static(lowerCamelCase_ ,lowerCamelCase_ ):
raise ValueError("""Invalid public key""" )
UpperCAmelCase_ : List[str] = pow(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
return shaaaa(str(lowerCamelCase_ ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 345 |
from ..utils import DummyObject, requires_backends
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Tuple = ["flax"]
def __init__( self: str ,*lowerCamelCase_: int ,**lowerCamelCase_: List[str] ) -> str:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Optional[Any] ,*lowerCamelCase_: Dict ,**lowerCamelCase_: List[str] ) -> Any:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: Optional[int] ,*lowerCamelCase_: Optional[int] ,**lowerCamelCase_: int ) -> Optional[int]:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Any = ["flax"]
def __init__( self: int ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: Tuple ) -> Union[str, Any]:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Optional[int] ,*lowerCamelCase_: Optional[int] ,**lowerCamelCase_: List[str] ) -> Union[str, Any]:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: Tuple ,*lowerCamelCase_: Tuple ,**lowerCamelCase_: Any ) -> int:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Dict = ["flax"]
def __init__( self: Dict ,*lowerCamelCase_: Optional[int] ,**lowerCamelCase_: List[Any] ) -> Any:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Tuple ,*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: List[Any] ) -> str:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: int ,*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: Optional[Any] ) -> int:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : List[str] = ["flax"]
def __init__( self: str ,*lowerCamelCase_: List[str] ,**lowerCamelCase_: Optional[int] ) -> Union[str, Any]:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Union[str, Any] ,*lowerCamelCase_: Any ,**lowerCamelCase_: Any ) -> Any:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: Dict ,*lowerCamelCase_: int ,**lowerCamelCase_: Optional[Any] ) -> int:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : int = ["flax"]
def __init__( self: Dict ,*lowerCamelCase_: Tuple ,**lowerCamelCase_: List[str] ) -> Optional[Any]:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Optional[Any] ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: str ) -> Any:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: Union[str, Any] ,*lowerCamelCase_: Dict ,**lowerCamelCase_: Optional[Any] ) -> str:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Optional[int] = ["flax"]
def __init__( self: str ,*lowerCamelCase_: Dict ,**lowerCamelCase_: Optional[int] ) -> Tuple:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: int ,*lowerCamelCase_: int ,**lowerCamelCase_: Tuple ) -> List[str]:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: str ,*lowerCamelCase_: Union[str, Any] ,**lowerCamelCase_: Optional[Any] ) -> Any:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : List[Any] = ["flax"]
def __init__( self: Union[str, Any] ,*lowerCamelCase_: Tuple ,**lowerCamelCase_: int ) -> List[Any]:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Tuple ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: Dict ) -> Dict:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: Dict ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: str ) -> Any:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Tuple = ["flax"]
def __init__( self: str ,*lowerCamelCase_: Any ,**lowerCamelCase_: int ) -> Tuple:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Dict ,*lowerCamelCase_: Optional[int] ,**lowerCamelCase_: Union[str, Any] ) -> List[str]:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: str ,*lowerCamelCase_: Union[str, Any] ,**lowerCamelCase_: Dict ) -> Optional[int]:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : str = ["flax"]
def __init__( self: Optional[Any] ,*lowerCamelCase_: str ,**lowerCamelCase_: List[str] ) -> Optional[Any]:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: List[str] ,*lowerCamelCase_: Dict ,**lowerCamelCase_: int ) -> List[str]:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: str ,*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: int ) -> Union[str, Any]:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Union[str, Any] = ["flax"]
def __init__( self: Any ,*lowerCamelCase_: Tuple ,**lowerCamelCase_: Optional[int] ) -> List[str]:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Optional[int] ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: str ) -> Union[str, Any]:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: List[Any] ,*lowerCamelCase_: Any ,**lowerCamelCase_: Any ) -> int:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Tuple = ["flax"]
def __init__( self: Any ,*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: Dict ) -> str:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Tuple ,*lowerCamelCase_: Union[str, Any] ,**lowerCamelCase_: List[str] ) -> int:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: List[Any] ,*lowerCamelCase_: str ,**lowerCamelCase_: str ) -> Any:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Optional[Any] = ["flax"]
def __init__( self: Dict ,*lowerCamelCase_: int ,**lowerCamelCase_: Optional[Any] ) -> Union[str, Any]:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: int ,*lowerCamelCase_: int ,**lowerCamelCase_: Tuple ) -> Union[str, Any]:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: Optional[Any] ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: Optional[int] ) -> int:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Optional[int] = ["flax"]
def __init__( self: List[str] ,*lowerCamelCase_: Dict ,**lowerCamelCase_: Dict ) -> int:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Dict ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: Dict ) -> Union[str, Any]:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: int ,*lowerCamelCase_: Any ,**lowerCamelCase_: Any ) -> Optional[Any]:
requires_backends(cls ,["""flax"""] )
| 345 | 1 |
from math import pow
def lowerCamelCase_ ( _a : int , _a : int , _a : int , _a : int , _a : int , ):
'''simple docstring'''
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
UpperCAmelCase_ : Dict = int(pow(_a , _a ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = backtrack(
_a , _a , current_number + 1 , _a , _a )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
UpperCAmelCase_ , UpperCAmelCase_ : str = backtrack(
_a , _a , current_number + 1 , _a , _a )
return current_sum, solutions_count
def lowerCamelCase_ ( _a : int , _a : int ):
'''simple docstring'''
if not (1 <= needed_sum <= 1000 and 2 <= power <= 10):
raise ValueError(
"""Invalid input\n"""
"""needed_sum must be between 1 and 1000, power between 2 and 10.""" )
return backtrack(_a , _a , 1 , 0 , 0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 345 |
import random
from typing import Any
def lowerCamelCase_ ( _a : list ):
'''simple docstring'''
for _ in range(len(_a ) ):
UpperCAmelCase_ : Tuple = random.randint(0 , len(_a ) - 1 )
UpperCAmelCase_ : List[Any] = random.randint(0 , len(_a ) - 1 )
UpperCAmelCase_ , UpperCAmelCase_ : int = data[b], data[a]
return data
if __name__ == "__main__":
UpperCamelCase_ = [0, 1, 2, 3, 4, 5, 6, 7]
UpperCamelCase_ = ['''python''', '''says''', '''hello''', '''!''']
print('''Fisher-Yates Shuffle:''')
print('''List''', integers, strings)
print('''FY Shuffle''', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 345 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase_ = {'''processing_layoutxlm''': ['''LayoutXLMProcessor''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ['''LayoutXLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ['''LayoutXLMTokenizerFast''']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 345 |
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class _snake_case ( nn.Module ):
'''simple docstring'''
A__ : int
A__ : int
A__ : float = 0.0
A__ : int = 1
A__ : int = 1
A__ : bool = True
A__ : bool = False
A__ : bool = False
A__ : bool = False
A__ : jnp.dtype = jnp.floataa
def A__ ( self: Dict ) -> List[str]:
UpperCAmelCase_ : Optional[int] = []
UpperCAmelCase_ : Optional[int] = []
for i in range(self.num_layers ):
UpperCAmelCase_ : List[Any] = self.in_channels if i == 0 else self.out_channels
UpperCAmelCase_ : List[Any] = FlaxResnetBlockaD(
in_channels=lowerCamelCase_ ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = FlaxTransformeraDModel(
in_channels=self.out_channels ,n_heads=self.num_attention_heads ,d_head=self.out_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,only_cross_attention=self.only_cross_attention ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
attentions.append(lowerCamelCase_ )
UpperCAmelCase_ : int = resnets
UpperCAmelCase_ : Tuple = attentions
if self.add_downsample:
UpperCAmelCase_ : List[Any] = FlaxDownsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self: Optional[Any] ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: str ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: int=True ) -> int:
UpperCAmelCase_ : List[Any] = ()
for resnet, attn in zip(self.resnets ,self.attentions ):
UpperCAmelCase_ : str = resnet(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = attn(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
output_states += (hidden_states,)
if self.add_downsample:
UpperCAmelCase_ : List[Any] = self.downsamplers_a(lowerCamelCase_ )
output_states += (hidden_states,)
return hidden_states, output_states
class _snake_case ( nn.Module ):
'''simple docstring'''
A__ : int
A__ : int
A__ : float = 0.0
A__ : int = 1
A__ : bool = True
A__ : jnp.dtype = jnp.floataa
def A__ ( self: Dict ) -> int:
UpperCAmelCase_ : List[str] = []
for i in range(self.num_layers ):
UpperCAmelCase_ : int = self.in_channels if i == 0 else self.out_channels
UpperCAmelCase_ : Dict = FlaxResnetBlockaD(
in_channels=lowerCamelCase_ ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = resnets
if self.add_downsample:
UpperCAmelCase_ : List[str] = FlaxDownsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self: Any ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Any ,lowerCamelCase_: List[Any]=True ) -> Any:
UpperCAmelCase_ : Union[str, Any] = ()
for resnet in self.resnets:
UpperCAmelCase_ : Tuple = resnet(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
output_states += (hidden_states,)
if self.add_downsample:
UpperCAmelCase_ : List[str] = self.downsamplers_a(lowerCamelCase_ )
output_states += (hidden_states,)
return hidden_states, output_states
class _snake_case ( nn.Module ):
'''simple docstring'''
A__ : int
A__ : int
A__ : int
A__ : float = 0.0
A__ : int = 1
A__ : int = 1
A__ : bool = True
A__ : bool = False
A__ : bool = False
A__ : bool = False
A__ : jnp.dtype = jnp.floataa
def A__ ( self: str ) -> Any:
UpperCAmelCase_ : Dict = []
UpperCAmelCase_ : List[str] = []
for i in range(self.num_layers ):
UpperCAmelCase_ : int = self.in_channels if (i == self.num_layers - 1) else self.out_channels
UpperCAmelCase_ : int = self.prev_output_channel if i == 0 else self.out_channels
UpperCAmelCase_ : Optional[Any] = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(lowerCamelCase_ )
UpperCAmelCase_ : int = FlaxTransformeraDModel(
in_channels=self.out_channels ,n_heads=self.num_attention_heads ,d_head=self.out_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,only_cross_attention=self.only_cross_attention ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
attentions.append(lowerCamelCase_ )
UpperCAmelCase_ : List[str] = resnets
UpperCAmelCase_ : Dict = attentions
if self.add_upsample:
UpperCAmelCase_ : Optional[Any] = FlaxUpsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self: Optional[int] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: int ,lowerCamelCase_: Any ,lowerCamelCase_: str ,lowerCamelCase_: List[str]=True ) -> List[str]:
for resnet, attn in zip(self.resnets ,self.attentions ):
# pop res hidden states
UpperCAmelCase_ : List[str] = res_hidden_states_tuple[-1]
UpperCAmelCase_ : Union[str, Any] = res_hidden_states_tuple[:-1]
UpperCAmelCase_ : Optional[Any] = jnp.concatenate((hidden_states, res_hidden_states) ,axis=-1 )
UpperCAmelCase_ : Tuple = resnet(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = attn(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
if self.add_upsample:
UpperCAmelCase_ : Dict = self.upsamplers_a(lowerCamelCase_ )
return hidden_states
class _snake_case ( nn.Module ):
'''simple docstring'''
A__ : int
A__ : int
A__ : int
A__ : float = 0.0
A__ : int = 1
A__ : bool = True
A__ : jnp.dtype = jnp.floataa
def A__ ( self: Dict ) -> Dict:
UpperCAmelCase_ : Any = []
for i in range(self.num_layers ):
UpperCAmelCase_ : str = self.in_channels if (i == self.num_layers - 1) else self.out_channels
UpperCAmelCase_ : Optional[int] = self.prev_output_channel if i == 0 else self.out_channels
UpperCAmelCase_ : Any = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(lowerCamelCase_ )
UpperCAmelCase_ : str = resnets
if self.add_upsample:
UpperCAmelCase_ : Union[str, Any] = FlaxUpsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self: Dict ,lowerCamelCase_: Dict ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Any=True ) -> List[str]:
for resnet in self.resnets:
# pop res hidden states
UpperCAmelCase_ : Dict = res_hidden_states_tuple[-1]
UpperCAmelCase_ : str = res_hidden_states_tuple[:-1]
UpperCAmelCase_ : List[Any] = jnp.concatenate((hidden_states, res_hidden_states) ,axis=-1 )
UpperCAmelCase_ : List[str] = resnet(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
if self.add_upsample:
UpperCAmelCase_ : Optional[Any] = self.upsamplers_a(lowerCamelCase_ )
return hidden_states
class _snake_case ( nn.Module ):
'''simple docstring'''
A__ : int
A__ : float = 0.0
A__ : int = 1
A__ : int = 1
A__ : bool = False
A__ : bool = False
A__ : jnp.dtype = jnp.floataa
def A__ ( self: Dict ) -> List[str]:
# there is always at least one resnet
UpperCAmelCase_ : List[Any] = [
FlaxResnetBlockaD(
in_channels=self.in_channels ,out_channels=self.in_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
]
UpperCAmelCase_ : Any = []
for _ in range(self.num_layers ):
UpperCAmelCase_ : Optional[Any] = FlaxTransformeraDModel(
in_channels=self.in_channels ,n_heads=self.num_attention_heads ,d_head=self.in_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
attentions.append(lowerCamelCase_ )
UpperCAmelCase_ : Any = FlaxResnetBlockaD(
in_channels=self.in_channels ,out_channels=self.in_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(lowerCamelCase_ )
UpperCAmelCase_ : Dict = resnets
UpperCAmelCase_ : Any = attentions
def __call__( self: str ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: str ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Union[str, Any]=True ) -> List[Any]:
UpperCAmelCase_ : List[Any] = self.resnets[0](lowerCamelCase_ ,lowerCamelCase_ )
for attn, resnet in zip(self.attentions ,self.resnets[1:] ):
UpperCAmelCase_ : Optional[Any] = attn(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = resnet(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
return hidden_states
| 345 | 1 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.